gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import os
import sys
import logging
import boto3
import pandas as pd
from datetime import datetime
import urllib.request
from dataactbroker.helpers.pandas_helper import check_dataframe_diff
from dataactbroker.helpers.uri_helper import RetrieveFileFromUri
from dataactcore.logging import configure_logging
from dataactcore.interfaces.db import GlobalDB
from dataactcore.interfaces.function_bag import update_external_data_load_date
from dataactcore.config import CONFIG_BROKER
from dataactcore.models.domainModels import CityCode, CountyCode, States, ZipCity
from dataactvalidator.health_check import create_app
from dataactvalidator.scripts.loader_utils import insert_dataframe, trim_item
logger = logging.getLogger(__name__)
def clean_data(data, field_map):
""" Clean up the data by removing columns that aren't relevant and renaming the remaining ones to match what we
need.
Args:
data: data to clean
field_map: mapping of all relevant columns
"""
# toss out any columns from the csv that aren't in the fieldMap parameter
data = data[list(field_map.keys())]
# rename columns as specified in fieldMap
data = data.rename(columns=field_map)
# trim all columns
data = data.applymap(lambda x: trim_item(x) if len(str(x).strip()) else None)
return data
def parse_city_file(city_file):
""" Parse the City file and insert all relevant rows into the database.
Args:
city_file: path/url to file to gather City data from
Returns:
The data in a pandas object, cleaned and sorted for insertion into the DB
"""
# read the data and clean up the column names
data = pd.read_csv(city_file, dtype=str, sep="|")
data = clean_data(
data,
{"FEATURE_NAME": "feature_name",
"FEATURE_CLASS": "feature_class",
"CENSUS_CODE": "city_code",
"STATE_ALPHA": "state_code",
"COUNTY_NUMERIC": "county_number",
"COUNTY_NAME": "county_name",
"PRIMARY_LATITUDE": "latitude",
"PRIMARY_LONGITUDE": "longitude"})
# add a sort column based on feature_class and remove anything with a different feature class or empty city_code
feature_class_ranking = {"Populated Place": 1, "Locale": 2, "Civil": 3, "Census": 4}
data = data[pd.notnull(data['city_code'])]
data['sorting_col'] = data['feature_class'].map(feature_class_ranking)
data = data[pd.notnull(data['sorting_col'])]
# sort by feature_class then remove any duplicates within state/city code combo (we keep the first occurrence
# because we've sorted by priority so the one that would overwrite the others is on top already)
data = data.sort_values(by=['sorting_col'])
data = data[~data.duplicated(subset=['state_code', 'city_code'], keep='first')]
data = data.drop('sorting_col', axis=1)
# add created_at and updated_at columns
now = datetime.utcnow()
data = data.assign(created_at=now, updated_at=now)
# just sorting it how it started out
data = data.sort_values(by=['feature_name'])
return data
def parse_county_file(county_file):
""" Parse the County file and insert all relevant rows into the database.
Args:
county_file: path/url to file to gather County data from
Returns:
The data in a pandas object, cleaned and sorted for insertion into the DB
"""
# read the data and clean up the column names
data = pd.read_csv(county_file, dtype=str, sep="|")
data = clean_data(
data,
{"COUNTY_NUMERIC": "county_number",
"COUNTY_NAME": "county_name",
"STATE_ALPHA": "state_code"})
# remove all blank county_number rows. Not much use in a county number table
data = data[pd.notnull(data['county_number'])]
# remove duplicates because we have no use for them (there may be none, this is a precaution)
data = data[~data.duplicated(subset=['county_number', 'state_code'], keep='first')]
# add created_at and updated_at columns
now = datetime.utcnow()
data = data.assign(created_at=now, updated_at=now)
return data
def parse_state_file(state_file):
""" Parse the State file and insert all relevant rows into the database.
Args:
state_file: path/url to file to gather State data from
Returns:
The data in a pandas object, cleaned and sorted for insertion into the DB
"""
# read the data. Cleaning is in there in case something changes, doesn't really do anything now
data = pd.read_csv(state_file, dtype=str)
data = clean_data(
data,
{"state_name": "state_name",
"state_code": "state_code",
"fips_code": "fips_code"})
# add created_at and updated_at columns
now = datetime.utcnow()
data = data.assign(created_at=now, updated_at=now)
return data
def parse_zip_city_file(f):
""" Parse the ZipCity file and insert all relevant rows into the database.
Args:
f: file to process
Returns:
The data in a pandas object, cleaned and sorted for insertion into the DB
"""
line_size = 129
chunk_size = 1024 * 10
f.read(line_size)
data_dict = {}
curr_chunk = ""
while True:
# grab the next chunk
next_chunk = f.read(chunk_size)
# when streaming from S3 it reads in as bytes, we need to decode it as a utf-8 string
if not type(next_chunk) == str:
next_chunk = next_chunk.decode("utf-8")
# add the new chunk of the file to the current chunk we're processing
curr_chunk += next_chunk
# if the current chunk is smaller than the line size, we're done
if len(curr_chunk) < line_size:
break
# while we can still do more processing on the current chunk, process it per line
while len(curr_chunk) >= line_size:
# grab another line and get the data if it's a "detail record"
curr_row = curr_chunk[:line_size]
if curr_row[0] == "D":
zip_code = curr_row[1:6]
city_name = curr_row[62:90].strip()
data_dict[zip_code] = {"zip_code": zip_code, "city_name": city_name}
# cut the current line out of the chunk we're processing
curr_chunk = curr_chunk[line_size:]
data = pd.DataFrame([[item['zip_code'], item['city_name']] for _, item in data_dict.items()],
columns=['zip_code', 'city_name'])
# add created_at and updated_at columns
now = datetime.utcnow()
data = data.assign(created_at=now, updated_at=now)
return data
def load_city_data(force_reload):
""" Load data into the CityCode table
Args:
force_reload: boolean to determine if reload should happen whether there are differences or not
"""
start_time = datetime.now()
# parse the new city code data
city_file_url = '{}/NationalFedCodes.txt'.format(CONFIG_BROKER['usas_public_reference_url'])
with RetrieveFileFromUri(city_file_url, 'r').get_file_object() as city_file:
new_data = parse_city_file(city_file)
diff_found = check_dataframe_diff(new_data, CityCode, ['city_code_id'], ['state_code', 'city_code'])
if force_reload or diff_found:
sess = GlobalDB.db().session
logger.info('Differences found or reload forced, reloading city_code table.')
# delete any data in the CityCode table
sess.query(CityCode).delete()
# insert data into table
num = insert_dataframe(new_data, CityCode.__table__.name, sess.connection())
logger.info('{} records inserted to city_code'.format(num))
sess.commit()
update_external_data_load_date(start_time, datetime.now(), 'city')
else:
logger.info('No differences found, skipping city_code table reload.')
def load_county_data(force_reload):
""" Load data into the CountyCode table
Args:
force_reload: boolean to determine if reload should happen whether there are differences or not
"""
start_time = datetime.now()
county_file_url = '{}/GOVT_UNITS.txt'.format(CONFIG_BROKER['usas_public_reference_url'])
with RetrieveFileFromUri(county_file_url, 'r').get_file_object() as county_file:
new_data = parse_county_file(county_file)
diff_found = check_dataframe_diff(new_data, CountyCode, ['county_code_id'], ['county_number', 'state_code'])
if force_reload or diff_found:
sess = GlobalDB.db().session
logger.info('Differences found or reload forced, reloading county_code table.')
# delete any data in the CountyCode table
sess.query(CountyCode).delete()
# insert data into table
num = insert_dataframe(new_data, CountyCode.__table__.name, sess.connection())
logger.info('{} records inserted to county_code'.format(num))
sess.commit()
update_external_data_load_date(start_time, datetime.now(), 'county_code')
else:
logger.info('No differences found, skipping county_code table reload.')
def load_state_data(force_reload):
""" Load data into the States table
Args:
force_reload: boolean to determine if reload should happen whether there are differences or not
"""
start_time = datetime.now()
state_file_url = '{}/state_list.csv'.format(CONFIG_BROKER['usas_public_reference_url'])
with RetrieveFileFromUri(state_file_url, 'r').get_file_object() as state_file:
new_data = parse_state_file(state_file)
diff_found = check_dataframe_diff(new_data, States, ['states_id'], ['state_code'])
if force_reload or diff_found:
sess = GlobalDB.db().session
logger.info('Differences found or reload forced, reloading states table.')
# delete any data in the States table
sess.query(States).delete()
# insert data into table
num = insert_dataframe(new_data, States.__table__.name, sess.connection())
logger.info('{} records inserted to states'.format(num))
sess.commit()
update_external_data_load_date(start_time, datetime.now(), 'state_code')
else:
logger.info('No differences found, skipping states table reload.')
def load_zip_city_data(force_reload):
""" Load data into the ZipCity table
Args:
force_reload: boolean to determine if reload should happen whether there are differences or not
"""
if CONFIG_BROKER["use_aws"]:
s3_client = boto3.client('s3', region_name=CONFIG_BROKER['aws_region'])
citystate_file = s3_client.generate_presigned_url('get_object', {'Bucket': CONFIG_BROKER['sf_133_bucket'],
'Key': "ctystate.txt"}, ExpiresIn=600)
zip_city_file = urllib.request.urlopen(citystate_file)
else:
citystate_file = os.path.join(CONFIG_BROKER["path"], "dataactvalidator", "config", "ctystate.txt")
zip_city_file = open(citystate_file)
new_data = parse_zip_city_file(zip_city_file)
diff_found = check_dataframe_diff(new_data, ZipCity, ['zip_city_id'], ['zip_code'])
if force_reload or diff_found:
sess = GlobalDB.db().session
logger.info('Differences found or reload forced, reloading zip_city table.')
# delete any data in the ZipCity table
sess.query(ZipCity).delete()
# insert data into table
num = insert_dataframe(new_data, ZipCity.__table__.name, sess.connection())
logger.info('{} records inserted to zip_city'.format(num))
sess.commit()
else:
logger.info('No differences found, skipping zip_city table reload.')
def load_location_data(force_reload=False):
""" Loads the city, county, state, citystate, and zipcity data.
Args:
force_reload: reloads the tables even if there are no differences found in data
"""
with create_app().app_context():
logger.info('Loading city data')
load_city_data(force_reload)
logger.info('Loading county data')
load_county_data(force_reload)
logger.info('Loading state data')
load_state_data(force_reload)
logger.info('Loading zip city data')
load_zip_city_data(force_reload)
if __name__ == '__main__':
configure_logging()
reload = '--force' in sys.argv
load_location_data(reload)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Independent distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.distributions import kullback_leibler
class Independent(distribution_lib.Distribution):
"""Independent distribution from batch of distributions.
This distribution is useful for regarding a collection of independent,
non-identical distributions as a single random variable. For example, the
`Indpendent` distribution composed of a collection of `Bernoulli`
distributions might define a distribution over an image (where each
`Bernoulli` is a distribution over each pixel).
More precisely, a collection of `B` (independent) `E`-variate random variables
(rv) `{X_1, ..., X_B}`, can be regarded as a `[B, E]`-variate random variable
`(X_1, ..., X_B)` with probability
`p(x_1, ..., x_B) = p_1(x_1) * ... * p_B(x_B)` where `p_b(X_b)` is the
probability of the `b`-th rv. More generally `B, E` can be arbitrary shapes.
Similarly, the `Independent` distribution specifies a distribution over `[B,
E]`-shaped events. It operates by reinterpreting the rightmost batch dims as
part of the event dimensions. The `reinterpreted_batch_ndims` parameter
controls the number of batch dims which are absorbed as event dims;
`reinterpreted_batch_ndims < len(batch_shape)`. For example, the `log_prob`
function entails a `reduce_sum` over the rightmost `reinterpreted_batch_ndims`
after calling the base distribution's `log_prob`. In other words, since the
batch dimension(s) index independent distributions, the resultant multivariate
will have independent components.
#### Mathematical Details
The probability function is,
```none
prob(x; reinterpreted_batch_ndims) = tf.reduce_prod(
dist.prob(x),
axis=-1-range(reinterpreted_batch_ndims))
```
#### Examples
```python
tfd = tf.contrib.distributions
# Make independent distribution from a 2-batch Normal.
ind = tfd.Independent(
distribution=tfd.Normal(loc=[-1., 1], scale=[0.1, 0.5]),
reinterpreted_batch_ndims=1)
# All batch dims have been "absorbed" into event dims.
ind.batch_shape # ==> []
ind.event_shape # ==> [2]
# Make independent distribution from a 2-batch bivariate Normal.
ind = tfd.Independent(
distribution=tfd.MultivariateNormalDiag(
loc=[[-1., 1], [1, -1]],
scale_identity_multiplier=[1., 0.5]),
reinterpreted_batch_ndims=1)
# All batch dims have been "absorbed" into event dims.
ind.batch_shape # ==> []
ind.event_shape # ==> [2, 2]
```
"""
def __init__(
self, distribution, reinterpreted_batch_ndims=None,
validate_args=False, name=None):
"""Construct a `Independent` distribution.
Args:
distribution: The base distribution instance to transform. Typically an
instance of `Distribution`.
reinterpreted_batch_ndims: Scalar, integer number of rightmost batch dims
which will be regarded as event dims. When `None` all but the first
batch axis (batch axis 0) will be transferred to event dimensions
(analogous to `tf.layers.flatten`).
validate_args: Python `bool`. Whether to validate input with asserts.
If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
name: The name for ops managed by the distribution.
Default value: `Independent + distribution.name`.
Raises:
ValueError: if `reinterpreted_batch_ndims` exceeds
`distribution.batch_ndims`
"""
parameters = locals()
name = name or "Independent" + distribution.name
self._distribution = distribution
with ops.name_scope(name):
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = self._get_default_reinterpreted_batch_ndims(
distribution)
reinterpreted_batch_ndims = ops.convert_to_tensor(
reinterpreted_batch_ndims,
dtype=dtypes.int32,
name="reinterpreted_batch_ndims")
self._reinterpreted_batch_ndims = reinterpreted_batch_ndims
self._static_reinterpreted_batch_ndims = tensor_util.constant_value(
reinterpreted_batch_ndims)
if self._static_reinterpreted_batch_ndims is not None:
self._reinterpreted_batch_ndims = self._static_reinterpreted_batch_ndims
super(Independent, self).__init__(
dtype=self._distribution.dtype,
reparameterization_type=self._distribution.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=self._distribution.allow_nan_stats,
parameters=parameters,
graph_parents=(
[reinterpreted_batch_ndims] +
distribution._graph_parents), # pylint: disable=protected-access
name=name)
self._runtime_assertions = self._make_runtime_assertions(
distribution, reinterpreted_batch_ndims, validate_args)
@property
def distribution(self):
return self._distribution
@property
def reinterpreted_batch_ndims(self):
return self._reinterpreted_batch_ndims
def _batch_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
batch_shape = self.distribution.batch_shape_tensor()
batch_ndims = (batch_shape.shape[0].value
if batch_shape.shape.with_rank_at_least(1)[0].value
else array_ops.shape(batch_shape)[0])
return batch_shape[:batch_ndims - self.reinterpreted_batch_ndims]
def _batch_shape(self):
batch_shape = self.distribution.batch_shape
if (self._static_reinterpreted_batch_ndims is None
or batch_shape.ndims is None):
return tensor_shape.TensorShape(None)
d = batch_shape.ndims - self._static_reinterpreted_batch_ndims
return batch_shape[:d]
def _event_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
batch_shape = self.distribution.batch_shape_tensor()
batch_ndims = (batch_shape.shape[0].value
if batch_shape.shape.with_rank_at_least(1)[0].value
else array_ops.shape(batch_shape)[0])
return array_ops.concat([
batch_shape[batch_ndims - self.reinterpreted_batch_ndims:],
self.distribution.event_shape_tensor(),
], axis=0)
def _event_shape(self):
batch_shape = self.distribution.batch_shape
if (self._static_reinterpreted_batch_ndims is None
or batch_shape.ndims is None):
return tensor_shape.TensorShape(None)
d = batch_shape.ndims - self._static_reinterpreted_batch_ndims
return batch_shape[d:].concatenate(self.distribution.event_shape)
def _sample_n(self, n, seed):
with ops.control_dependencies(self._runtime_assertions):
return self.distribution.sample(sample_shape=n, seed=seed)
def _log_prob(self, x):
with ops.control_dependencies(self._runtime_assertions):
return self._reduce_sum(self.distribution.log_prob(x))
def _entropy(self):
with ops.control_dependencies(self._runtime_assertions):
return self._reduce_sum(self.distribution.entropy())
def _mean(self):
with ops.control_dependencies(self._runtime_assertions):
return self.distribution.mean()
def _variance(self):
with ops.control_dependencies(self._runtime_assertions):
return self.distribution.variance()
def _stddev(self):
with ops.control_dependencies(self._runtime_assertions):
return self.distribution.stddev()
def _mode(self):
with ops.control_dependencies(self._runtime_assertions):
return self.distribution.mode()
def _make_runtime_assertions(
self, distribution, reinterpreted_batch_ndims, validate_args):
assertions = []
static_reinterpreted_batch_ndims = tensor_util.constant_value(
reinterpreted_batch_ndims)
batch_ndims = distribution.batch_shape.ndims
if batch_ndims is not None and static_reinterpreted_batch_ndims is not None:
if static_reinterpreted_batch_ndims > batch_ndims:
raise ValueError("reinterpreted_batch_ndims({}) cannot exceed "
"distribution.batch_ndims({})".format(
static_reinterpreted_batch_ndims, batch_ndims))
elif validate_args:
batch_shape = distribution.batch_shape_tensor()
batch_ndims = (
batch_shape.shape[0].value
if batch_shape.shape.with_rank_at_least(1)[0].value is not None
else array_ops.shape(batch_shape)[0])
assertions.append(check_ops.assert_less_equal(
reinterpreted_batch_ndims, batch_ndims,
message=("reinterpreted_batch_ndims cannot exceed "
"distribution.batch_ndims")))
return assertions
def _reduce_sum(self, stat):
if self._static_reinterpreted_batch_ndims is None:
range_ = math_ops.range(self._reinterpreted_batch_ndims)
else:
range_ = np.arange(self._static_reinterpreted_batch_ndims)
return math_ops.reduce_sum(stat, axis=-1-range_)
def _get_default_reinterpreted_batch_ndims(self, distribution):
"""Computes the default value for reinterpreted_batch_ndim __init__ arg."""
ndims = distribution.batch_shape.ndims
if ndims is None:
which_maximum = math_ops.maximum
ndims = array_ops.shape(distribution.batch_shape_tensor())[0]
else:
which_maximum = np.maximum
return which_maximum(0, ndims - 1)
@kullback_leibler.RegisterKL(Independent, Independent)
def _kl_independent(a, b, name="kl_independent"):
"""Batched KL divergence `KL(a || b)` for Independent distributions.
We can leverage the fact that
```
KL(Independent(a) || Independent(b)) = sum(KL(a || b))
```
where the sum is over the `reinterpreted_batch_ndims`.
Args:
a: Instance of `Independent`.
b: Instance of `Independent`.
name: (optional) name to use for created ops. Default "kl_independent".
Returns:
Batchwise `KL(a || b)`.
Raises:
ValueError: If the event space for `a` and `b`, or their underlying
distributions don't match.
"""
p = a.distribution
q = b.distribution
# The KL between any two (non)-batched distributions is a scalar.
# Given that the KL between two factored distributions is the sum, i.e.
# KL(p1(x)p2(y) || q1(x)q2(y)) = KL(p1 || q1) + KL(q1 || q2), we compute
# KL(p || q) and do a `reduce_sum` on the reinterpreted batch dimensions.
if a.event_shape.is_fully_defined() and b.event_shape.is_fully_defined():
if a.event_shape == b.event_shape:
if p.event_shape == q.event_shape:
num_reduce_dims = a.event_shape.ndims - p.event_shape.ndims
reduce_dims = [-i - 1 for i in range(0, num_reduce_dims)]
return math_ops.reduce_sum(
kullback_leibler.kl_divergence(p, q, name=name), axis=reduce_dims)
else:
raise NotImplementedError("KL between Independents with different "
"event shapes not supported.")
else:
raise ValueError("Event shapes do not match.")
else:
with ops.control_dependencies([
check_ops.assert_equal(a.event_shape_tensor(), b.event_shape_tensor()),
check_ops.assert_equal(p.event_shape_tensor(), q.event_shape_tensor())
]):
num_reduce_dims = (
array_ops.shape(a.event_shape_tensor()[0]) -
array_ops.shape(p.event_shape_tensor()[0]))
reduce_dims = math_ops.range(-num_reduce_dims - 1, -1, 1)
return math_ops.reduce_sum(
kullback_leibler.kl_divergence(p, q, name=name), axis=reduce_dims)
|
|
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import uuid
from xml.dom import minidom
from eventlet import greenthread
from lxml import etree
import mock
from mox3 import mox
from oslo_concurrency.fixture import lockutils as lock_fixture
from nova.compute import utils as compute_utils
from nova import exception
from nova.network import linux_net
from nova import objects
from nova import test
from nova.tests.unit import fake_network
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova.virt.libvirt import firewall
from nova.virt.libvirt import host
from nova.virt import netutils
_fake_network_info = fake_network.fake_get_instance_nw_info
_fake_stub_out_get_nw_info = fake_network.stub_out_nw_api_get_instance_nw_info
_ipv4_like = fake_network.ipv4_like
class NWFilterFakes(object):
def __init__(self):
self.filters = {}
def nwfilterLookupByName(self, name):
if name in self.filters:
return self.filters[name]
raise fakelibvirt.libvirtError('Filter Not Found')
def filterDefineXMLMock(self, xml):
class FakeNWFilterInternal(object):
def __init__(self, parent, name, u, xml):
self.name = name
self.uuid = u
self.parent = parent
self.xml = xml
def XMLDesc(self, flags):
return self.xml
def undefine(self):
del self.parent.filters[self.name]
tree = etree.fromstring(xml)
name = tree.get('name')
u = tree.find('uuid')
if u is None:
u = uuid.uuid4().hex
else:
u = u.text
if name not in self.filters:
self.filters[name] = FakeNWFilterInternal(self, name, u, xml)
else:
if self.filters[name].uuid != u:
raise fakelibvirt.libvirtError(
"Mismatching name '%s' with uuid '%s' vs '%s'"
% (name, self.filters[name].uuid, u))
self.filters[name].xml = xml
return True
class IptablesFirewallTestCase(test.NoDBTestCase):
def setUp(self):
super(IptablesFirewallTestCase, self).setUp()
self.useFixture(lock_fixture.ExternalLockFixture())
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.fw = firewall.IptablesFirewallDriver(
host=host.Host("qemu:///system"))
in_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]',
'# Completed on Tue Dec 18 15:50:25 2012',
'# Generated by iptables-save v1.4.12 on Tue Dec 18 15:50:25 201;',
'*mangle',
':PREROUTING ACCEPT [241:39722]',
':INPUT ACCEPT [230:39282]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [266:26558]',
':POSTROUTING ACCEPT [267:26590]',
'-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM '
'--checksum-fill',
'COMMIT',
'# Completed on Tue Dec 18 15:50:25 2012',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
'# Completed on Tue Jan 18 23:47:56 2011',
]
def _create_instance_ref(self,
uuid="74526555-9166-4893-a203-126bdcab0d67"):
inst = objects.Instance(
id=7,
uuid=uuid,
user_id="fake",
project_id="fake",
image_ref='155d900f-4e14-4e4c-a73d-069cbf4541e6',
instance_type_id=1)
inst.info_cache = objects.InstanceInfoCache()
inst.info_cache.deleted = False
return inst
@mock.patch.object(objects.InstanceList, "get_by_security_group_id")
@mock.patch.object(objects.SecurityGroupRuleList, "get_by_instance")
def test_static_filters(self, mock_secrule, mock_instlist):
UUID = "2674993b-6adb-4733-abd9-a7c10cc1f146"
SRC_UUID = "0e0a76b2-7c52-4bc0-9a60-d83017e42c1a"
instance_ref = self._create_instance_ref(UUID)
src_instance_ref = self._create_instance_ref(SRC_UUID)
secgroup = objects.SecurityGroup(id=1,
user_id='fake',
project_id='fake',
name='testgroup',
description='test group')
src_secgroup = objects.SecurityGroup(id=2,
user_id='fake',
project_id='fake',
name='testsourcegroup',
description='src group')
r1 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
protocol='icmp',
from_port=-1,
to_port=-1,
cidr='192.168.11.0/24',
grantee_group=None)
r2 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
protocol='icmp',
from_port=8,
to_port=-1,
cidr='192.168.11.0/24',
grantee_group=None)
r3 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
protocol='tcp',
from_port=80,
to_port=81,
cidr='192.168.10.0/24',
grantee_group=None)
r4 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
protocol='tcp',
from_port=80,
to_port=81,
cidr=None,
grantee_group=src_secgroup,
group_id=src_secgroup['id'])
r5 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
protocol=None,
cidr=None,
grantee_group=src_secgroup,
group_id=src_secgroup['id'])
secgroup_list = objects.SecurityGroupList()
secgroup_list.objects.append(secgroup)
src_secgroup_list = objects.SecurityGroupList()
src_secgroup_list.objects.append(src_secgroup)
instance_ref.security_groups = secgroup_list
src_instance_ref.security_groups = src_secgroup_list
mock_secrule.return_value = objects.SecurityGroupRuleList(
objects=[r1, r2, r3, r4, r5])
def _fake_instlist(ctxt, id):
if id == src_secgroup['id']:
insts = objects.InstanceList()
insts.objects.append(src_instance_ref)
return insts
else:
insts = objects.InstanceList()
insts.objects.append(instance_ref)
return insts
mock_instlist.side_effect = _fake_instlist
def fake_iptables_execute(*cmd, **kwargs):
process_input = kwargs.get('process_input', None)
if cmd == ('ip6tables-save', '-c'):
return '\n'.join(self.in6_filter_rules), None
if cmd == ('iptables-save', '-c'):
return '\n'.join(self.in_rules), None
if cmd == ('iptables-restore', '-c'):
lines = process_input.split('\n')
if '*filter' in lines:
self.out_rules = lines
return '', ''
if cmd == ('ip6tables-restore', '-c',):
lines = process_input.split('\n')
if '*filter' in lines:
self.out6_rules = lines
return '', ''
network_model = _fake_network_info(self, 1)
linux_net.iptables_manager.execute = fake_iptables_execute
self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
lambda instance: network_model)
self.fw.prepare_instance_filter(instance_ref, network_model)
self.fw.apply_instance_filter(instance_ref, network_model)
in_rules = filter(lambda l: not l.startswith('#'),
self.in_rules)
for rule in in_rules:
if 'nova' not in rule:
self.assertIn(rule, self.out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
for rule in self.out_rules:
# This is pretty crude, but it'll do for now
# last two octets change
if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
instance_chain = rule.split(' ')[-1]
break
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
for rule in self.out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
break
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp '
'-s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp '
'--icmp-type 8 -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
for ip in network_model.fixed_ips():
if ip['version'] != 4:
continue
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp -m multiport '
'--dports 80:81 -s %s' % ip['address'])
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -s '
'%s' % ip['address'])
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"Protocol/port-less acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp '
'-m multiport --dports 80:81 -s 192.168.10.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
def test_filters_for_instance_with_ip_v6(self):
self.flags(use_ipv6=True)
network_info = _fake_network_info(self, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEqual(len(rulesv4), 2)
self.assertEqual(len(rulesv6), 1)
def test_filters_for_instance_without_ip_v6(self):
self.flags(use_ipv6=False)
network_info = _fake_network_info(self, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEqual(len(rulesv4), 2)
self.assertEqual(len(rulesv6), 0)
@mock.patch.object(objects.SecurityGroupRuleList, "get_by_instance")
def test_multinic_iptables(self, mock_secrule):
mock_secrule.return_value = objects.SecurityGroupRuleList()
ipv4_rules_per_addr = 1
ipv4_addr_per_network = 2
ipv6_rules_per_addr = 1
ipv6_addr_per_network = 1
networks_count = 5
instance_ref = self._create_instance_ref()
instance_ref.security_groups = objects.SecurityGroupList()
network_info = _fake_network_info(self, networks_count,
ipv4_addr_per_network)
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
'1.1.1.1'
ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
ipv4 = self.fw.iptables.ipv4['filter'].rules
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
# Extra rules are for the DHCP request
rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
networks_count) + 2
self.assertEqual(ipv4_network_rules, rules)
self.assertEqual(ipv6_network_rules,
ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
def test_do_refresh_security_group_rules(self):
instance_ref = self._create_instance_ref()
self.mox.StubOutWithMock(self.fw,
'instance_rules')
self.mox.StubOutWithMock(self.fw,
'add_filters_for_instance',
use_mock_anything=True)
self.mox.StubOutWithMock(self.fw.iptables.ipv4['filter'],
'has_chain')
self.fw.instance_rules(instance_ref,
mox.IgnoreArg()).AndReturn((None, None))
self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.fw.instance_rules(instance_ref,
mox.IgnoreArg()).AndReturn((None, None))
self.fw.iptables.ipv4['filter'].has_chain(mox.IgnoreArg()
).AndReturn(True)
self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.fw.prepare_instance_filter(instance_ref, mox.IgnoreArg())
self.fw.instance_info[instance_ref['id']] = (instance_ref, None)
self.fw.do_refresh_security_group_rules("fake")
def test_do_refresh_security_group_rules_instance_gone(self):
instance1 = objects.Instance(None, id=1, uuid='fake-uuid1')
instance2 = objects.Instance(None, id=2, uuid='fake-uuid2')
self.fw.instance_info = {1: (instance1, 'netinfo1'),
2: (instance2, 'netinfo2')}
mock_filter = mock.MagicMock()
with mock.patch.dict(self.fw.iptables.ipv4, {'filter': mock_filter}):
mock_filter.has_chain.return_value = False
with mock.patch.object(self.fw, 'instance_rules') as mock_ir:
mock_ir.return_value = (None, None)
self.fw.do_refresh_security_group_rules('secgroup')
self.assertEqual(2, mock_ir.call_count)
# NOTE(danms): Make sure that it is checking has_chain each time,
# continuing to process all the instances, and never adding the
# new chains back if has_chain() is False
mock_filter.has_chain.assert_has_calls([mock.call('inst-1'),
mock.call('inst-2')],
any_order=True)
self.assertEqual(0, mock_filter.add_chain.call_count)
@mock.patch.object(fakelibvirt.virConnect, "nwfilterLookupByName")
@mock.patch.object(fakelibvirt.virConnect, "nwfilterDefineXML")
@mock.patch.object(objects.InstanceList, "get_by_security_group_id")
@mock.patch.object(objects.SecurityGroupRuleList, "get_by_instance")
def test_unfilter_instance_undefines_nwfilter(self,
mock_secrule,
mock_instlist,
mock_define,
mock_lookup):
fakefilter = NWFilterFakes()
mock_lookup.side_effect = fakefilter.nwfilterLookupByName
mock_define.side_effect = fakefilter.filterDefineXMLMock
instance_ref = self._create_instance_ref()
instance_ref.security_groups = objects.SecurityGroupList()
mock_secrule.return_value = objects.SecurityGroupRuleList()
network_info = _fake_network_info(self, 1)
self.fw.setup_basic_filtering(instance_ref, network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
original_filter_count = len(fakefilter.filters)
self.fw.unfilter_instance(instance_ref, network_info)
# should undefine just the instance filter
self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
@mock.patch.object(firewall, 'libvirt', fakelibvirt)
class NWFilterTestCase(test.NoDBTestCase):
def setUp(self):
super(NWFilterTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.fw = firewall.NWFilterFirewall(host=host.Host("qemu:///system"))
def _create_security_group(self, instance_ref):
secgroup = objects.SecurityGroup(id=1,
user_id='fake',
project_id='fake',
name='testgroup',
description='test group description')
secgroup_list = objects.SecurityGroupList()
secgroup_list.objects.append(secgroup)
instance_ref.security_groups = secgroup_list
return secgroup
def _create_instance(self):
inst = objects.Instance(
id=7,
uuid="74526555-9166-4893-a203-126bdcab0d67",
user_id="fake",
project_id="fake",
image_ref='155d900f-4e14-4e4c-a73d-069cbf4541e6',
instance_type_id=1)
inst.info_cache = objects.InstanceInfoCache()
inst.info_cache.deleted = False
return inst
@mock.patch.object(fakelibvirt.virConnect, "nwfilterDefineXML")
def test_creates_base_rule_first(self, mock_define):
# These come pre-defined by libvirt
self.defined_filters = ['no-mac-spoofing',
'no-ip-spoofing',
'no-arp-spoofing',
'allow-dhcp-server']
self.recursive_depends = {}
for f in self.defined_filters:
self.recursive_depends[f] = []
def fake_define(xml):
dom = minidom.parseString(xml)
name = dom.firstChild.getAttribute('name')
self.recursive_depends[name] = []
for f in dom.getElementsByTagName('filterref'):
ref = f.getAttribute('filter')
self.assertIn(ref, self.defined_filters,
('%s referenced filter that does ' +
'not yet exist: %s') % (name, ref))
dependencies = [ref] + self.recursive_depends[ref]
self.recursive_depends[name] += dependencies
self.defined_filters.append(name)
return True
mock_define.side_effect = fake_define
instance_ref = self._create_instance()
self._create_security_group(instance_ref)
def _ensure_all_called(mac, allow_dhcp):
instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
mac.translate({ord(':'): None}))
requiredlist = ['no-arp-spoofing', 'no-ip-spoofing',
'no-mac-spoofing']
required_not_list = []
if allow_dhcp:
requiredlist.append('allow-dhcp-server')
else:
required_not_list.append('allow-dhcp-server')
for required in requiredlist:
self.assertIn(required,
self.recursive_depends[instance_filter],
"Instance's filter does not include %s" %
required)
for required_not in required_not_list:
self.assertNotIn(required_not,
self.recursive_depends[instance_filter],
"Instance filter includes %s" % required_not)
network_info = _fake_network_info(self, 1)
# since there is one (network_info) there is one vif
# pass this vif's mac to _ensure_all_called()
# to set the instance_filter properly
mac = network_info[0]['address']
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
'1.1.1.1'
self.fw.setup_basic_filtering(instance_ref, network_info)
allow_dhcp = True
_ensure_all_called(mac, allow_dhcp)
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = None
self.fw.setup_basic_filtering(instance_ref, network_info)
allow_dhcp = False
_ensure_all_called(mac, allow_dhcp)
@mock.patch.object(fakelibvirt.virConnect, "nwfilterLookupByName")
@mock.patch.object(fakelibvirt.virConnect, "nwfilterDefineXML")
def test_unfilter_instance_undefines_nwfilters(self,
mock_define,
mock_lookup):
fakefilter = NWFilterFakes()
mock_lookup.side_effect = fakefilter.nwfilterLookupByName
mock_define.side_effect = fakefilter.filterDefineXMLMock
instance_ref = self._create_instance()
self._create_security_group(instance_ref)
network_info = _fake_network_info(self, 1)
self.fw.setup_basic_filtering(instance_ref, network_info)
original_filter_count = len(fakefilter.filters)
self.fw.unfilter_instance(instance_ref, network_info)
self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
@mock.patch.object(fakelibvirt.virConnect, "nwfilterLookupByName")
@mock.patch.object(greenthread, 'sleep')
def test_unfilter_instance_retry_and_error(self, mock_sleep, mock_lookup):
# Tests that we try to undefine the network filter when it's in use
# until we hit a timeout. We try two times and sleep once in between.
self.flags(live_migration_retry_count=2)
in_use = fakelibvirt.libvirtError('nwfilter is in use')
in_use.err = (fakelibvirt.VIR_ERR_OPERATION_INVALID,)
mock_undefine = mock.Mock(side_effect=in_use)
fakefilter = mock.MagicMock(undefine=mock_undefine)
mock_lookup.return_value = fakefilter
instance_ref = self._create_instance()
network_info = _fake_network_info(self, 1)
self.assertRaises(fakelibvirt.libvirtError, self.fw.unfilter_instance,
instance_ref, network_info)
self.assertEqual(2, mock_lookup.call_count)
self.assertEqual(2, mock_undefine.call_count)
mock_sleep.assert_called_once_with(1)
@mock.patch.object(fakelibvirt.virConnect, "nwfilterLookupByName")
@mock.patch.object(greenthread, 'sleep')
def test_unfilter_instance_retry_not_found(self, mock_sleep, mock_lookup):
# Tests that we exit if the nw filter is not found.
in_use = fakelibvirt.libvirtError('nwfilter is in use')
in_use.err = (fakelibvirt.VIR_ERR_OPERATION_INVALID,)
not_found = fakelibvirt.libvirtError('no nwfilter with matching name')
not_found.err = (fakelibvirt.VIR_ERR_NO_NWFILTER,)
mock_undefine = mock.Mock(side_effect=(in_use, not_found))
fakefilter = mock.MagicMock(undefine=mock_undefine)
mock_lookup.return_value = fakefilter
instance_ref = self._create_instance()
network_info = _fake_network_info(self, 1)
self.fw.unfilter_instance(instance_ref, network_info)
self.assertEqual(2, mock_lookup.call_count)
self.assertEqual(2, mock_undefine.call_count)
mock_sleep.assert_called_once_with(1)
@mock.patch.object(fakelibvirt.virConnect, "nwfilterLookupByName")
@mock.patch.object(greenthread, 'sleep')
def test_unfilter_instance_retry_and_pass(self, mock_sleep, mock_lookup):
# Tests that we retry on in-use error but pass if undefine() works
# while looping.
in_use = fakelibvirt.libvirtError('nwfilter is in use')
in_use.err = (fakelibvirt.VIR_ERR_OPERATION_INVALID,)
mock_undefine = mock.Mock(side_effect=(in_use, None))
fakefilter = mock.MagicMock(undefine=mock_undefine)
mock_lookup.return_value = fakefilter
instance_ref = self._create_instance()
network_info = _fake_network_info(self, 1)
self.fw.unfilter_instance(instance_ref, network_info)
self.assertEqual(2, mock_lookup.call_count)
self.assertEqual(2, mock_undefine.call_count)
mock_sleep.assert_called_once_with(1)
def test_redefining_nwfilters(self):
fakefilter = NWFilterFakes()
self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
instance_ref = self._create_instance()
self._create_security_group(instance_ref)
network_info = _fake_network_info(self, 1)
self.fw.setup_basic_filtering(instance_ref, network_info)
self.fw.setup_basic_filtering(instance_ref, network_info)
@mock.patch.object(fakelibvirt.virConnect, "nwfilterLookupByName")
@mock.patch.object(fakelibvirt.virConnect, "nwfilterDefineXML")
def test_nwfilter_parameters(self,
mock_define,
mock_lookup):
fakefilter = NWFilterFakes()
mock_lookup.side_effect = fakefilter.nwfilterLookupByName
mock_define.side_effect = fakefilter.filterDefineXMLMock
instance_ref = self._create_instance()
self._create_security_group(instance_ref)
network_info = _fake_network_info(self, 1)
self.fw.setup_basic_filtering(instance_ref, network_info)
vif = network_info[0]
nic_id = vif['address'].replace(':', '')
instance_filter_name = self.fw._instance_filter_name(instance_ref,
nic_id)
f = fakefilter.nwfilterLookupByName(instance_filter_name)
tree = etree.fromstring(f.xml)
for fref in tree.findall('filterref'):
parameters = fref.findall('./parameter')
for parameter in parameters:
subnet_v4, subnet_v6 = vif['network']['subnets']
if parameter.get('name') == 'IP':
self.assertTrue(_ipv4_like(parameter.get('value'),
'192.168'))
elif parameter.get('name') == 'DHCPSERVER':
dhcp_server = subnet_v4.get('dhcp_server')
self.assertEqual(parameter.get('value'), dhcp_server)
elif parameter.get('name') == 'RASERVER':
ra_server = subnet_v6['gateway']['address'] + "/128"
self.assertEqual(parameter.get('value'), ra_server)
elif parameter.get('name') == 'PROJNET':
ipv4_cidr = subnet_v4['cidr']
net, mask = netutils.get_net_and_mask(ipv4_cidr)
self.assertEqual(parameter.get('value'), net)
elif parameter.get('name') == 'PROJMASK':
ipv4_cidr = subnet_v4['cidr']
net, mask = netutils.get_net_and_mask(ipv4_cidr)
self.assertEqual(parameter.get('value'), mask)
elif parameter.get('name') == 'PROJNET6':
ipv6_cidr = subnet_v6['cidr']
net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
self.assertEqual(parameter.get('value'), net)
elif parameter.get('name') == 'PROJMASK6':
ipv6_cidr = subnet_v6['cidr']
net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
self.assertEqual(parameter.get('value'), prefix)
else:
raise exception.InvalidParameterValue('unknown parameter '
'in filter')
@mock.patch.object(fakelibvirt.virConnect, "nwfilterLookupByName")
@mock.patch.object(fakelibvirt.virConnect, "nwfilterDefineXML")
def test_multinic_base_filter_selection(self,
mock_define,
mock_lookup):
fakefilter = NWFilterFakes()
mock_lookup.side_effect = fakefilter.nwfilterLookupByName
mock_define.side_effect = fakefilter.filterDefineXMLMock
instance_ref = self._create_instance()
self._create_security_group(instance_ref)
network_info = _fake_network_info(self, 2)
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
'1.1.1.1'
self.fw.setup_basic_filtering(instance_ref, network_info)
def assert_filterref(instance, vif, expected=None):
expected = expected or []
nic_id = vif['address'].replace(':', '')
filter_name = self.fw._instance_filter_name(instance, nic_id)
f = fakefilter.nwfilterLookupByName(filter_name)
tree = etree.fromstring(f.xml)
frefs = [fr.get('filter') for fr in tree.findall('filterref')]
self.assertEqual(set(expected), set(frefs))
assert_filterref(instance_ref, network_info[0],
expected=['nova-base'])
assert_filterref(instance_ref, network_info[1],
expected=['nova-nodhcp'])
@mock.patch.object(firewall.LOG, 'debug')
def test_get_filter_uuid_unicode_exception_logging(self, debug):
with mock.patch.object(self.fw._conn, 'nwfilterLookupByName') as look:
look.side_effect = fakelibvirt.libvirtError(u"\U0001F4A9")
self.fw._get_filter_uuid('test')
self.assertEqual(2, debug.call_count)
self.assertEqual(u"Cannot find UUID for filter '%(name)s': '%(e)s'",
debug.call_args_list[0][0][0])
|
|
## DESIGN
## This script takes in an url pointing to the main page of a comic/manga/manhwa in Batoto.net
## It then parses the chapter links, goes to each, downloads the images for each chapter and compresses
## them into zip files.
## Alternatively, we may be able to specify a name, and have the script search for it and ask for results.
## All choices should be one click; the software is deferential to the user.
## Let's build the script block by block, so we may have the choice to put in a GUI later.
## FUNCTIONS
## 1. Parsing an url to get the chapter links
## 1A.Confirm that an url points to the main page of a comic
## 2. Given a chapter link, identifying all the pages
## 2A.Retrieve the volume number, chapter number and name of the chapter; including support for fractional and negative chapter numbers
## 3. Downloading the pages and compressing them
import re
import urllib2
from lxml import html
from lxml.etree import tostring
from StringIO import StringIO
import gzip
import shutil
import os
import os.path
import glob
import sys
import zipfile
import urlparse
import argparse
import unicodedata
from string import maketrans
## Constants
__DOWNLOAD__ = True
__DEBUG__ = False
__RETRY_URL__ = 5
## Function to compress a directory
def zipdir(path, zipf):
for root, dirs, files in os.walk(path):
for f in files:
zipf.write(os.path.join(root, f), arcname = os.path.basename(f))
## Function to ask for URL
def checkURLType(url_input):
print "Checking: " + url_input
url_ok = False
for url_type in URL_TYPES:
if re.compile(URL_TYPES[url_type]['url']).match(url_input):
print "Site supported: " + url_type
url_ok = True
break
if not url_ok:
print "URL not supported or unknown"
exit(1)
return url_type
## Function to get a webpage
def readURL(url):
if url[0] == '/':
url = url_type + url
if __DEBUG__:
print "Reading url: " + url
request = urllib2.Request(url)
request.add_header('Accept-encoding', 'gzip')
for i in range(1, __RETRY_URL__):
try:
response = urllib2.urlopen(request)
if response.info().get('Content-Encoding') == 'gzip': # Large pages are often gzipped
buf = StringIO(response.read())
data = gzip.GzipFile(fileobj = buf)
else:
data = response
return data
except:
pass
## Function to retrieve and parse an HTML page
def readHTML(url):
data = readURL(url)
page = html.parse(data)
return page
## Function to download an image from a direct URL
def downloadImage(img_url, file_path):
data = readURL(img_url)
with open(file_path, 'wb') as f:
f.write(data.read())
## Fuction to clean the path from problematic characters.
def cleanPath(pathString):
if isinstance(pathString, unicode):
pathString = unicodedata.normalize('NFKD', pathString).encode('ascii', 'ignore')
pathString = pathString.translate(None, '\/:*?<>|')
transTable = maketrans("\"", "\'")
pathString = pathString.translate(transTable)
return pathString
## Generic class representing a manga chapter
class MangaChapter(object):
def __init__(self, manga_name, chapter_number, chapter_url, chapter_root_path, chapter_title=None, volume_number=None, group_name=None):
self.chapter_number = chapter_number
self.volume_number = volume_number
self.chapter_title = chapter_title
self.chapter_url = chapter_url
self.group_name = group_name
self.chapter_root_path = chapter_root_path
self.page_list = []
self.page_num = 0
prefix = [manga_name]
if volume_number is not None:
prefix.append("Volume " + volume_number)
prefix.append("Chapter " + chapter_number)
if chapter_title is not None:
prefix.append("- " + chapter_title)
if group_name is not None:
prefix.append("[" + group_name + "]")
self.prefix = " ".join(prefix)
def show(self):
print "Vol: ", self.volume_number, " Ch: ", self.chapter_number, " - ", self.chapter_title, ", by: ", self.group_name
def addPage(self, page_url):
self.page_list.append(page_url)
def retrieveAllPages(self):
raise NotImplementedError # To be overridden in subclasses
def downloadPage(self, page_url, page_file_path):
raise NotImplementedError # To be overridden in subclasses
def downloadChapter(self):
pathA = cleanPath(self.chapter_root_path)
pathB = cleanPath(self.prefix)
dir_path = os.path.join(pathA, pathB)
if verbose:
print ""
print dir_path
zip_path = dir_path + ".zip"
if os.path.exists(zip_path):
zipf = zipfile.ZipFile(zip_path)
if zipf.testzip() is None:
if __DEBUG__:
print "Skipping chapter " + self.chapter_number
return
else:
os.remove(zip_path)
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
os.makedirs(dir_path)
self.retrieveAllPages()
for page_url in self.page_list:
self.page_num = self.page_num + 1
page_path = os.path.join(dir_path, "p" + str(self.page_num).zfill(3))
self.downloadPage(page_url, page_path)
zipf = zipfile.ZipFile(zip_path, "w")
zipdir(dir_path, zipf)
zipf.close()
shutil.rmtree(dir_path)
## Subclass representing a manga chapter from Batoto
class MangaChapterBatoto(MangaChapter):
def __init__(self, manga_name, chapter_number, chapter_url, chapter_root_path, chapter_title=None, volume_number=None, group_name=None):
super(MangaChapterBatoto, self).__init__(manga_name, chapter_number, chapter_url, chapter_root_path, chapter_title, volume_number, group_name)
def retrieveAllPages(self):
## Look at the options of select element at //*[@id="page_select"]
## Take the value for each (page url) and save them
webpage = readHTML(self.chapter_url)
if webpage.xpath("//a[@href='?supress_webtoon=t']"): ## Long strip format for webtoons
if __DEBUG__:
print "Webtoon: reading in long strip format"
s = webpage.xpath("//div[@id='read_settings']/following-sibling::div/img")
for l in s:
self.addPage(l.get('src'))
else:
s = webpage.xpath("//*[@id='page_select']")[0]
for option in s.xpath('.//option[@value]'):
self.addPage(option.get('value'))
def downloadPage(self, page_url, page_file_path):
## Get @src attribute of element at //*[@id="comic_page"]
if urlparse.urlparse(page_url).path.split('.')[-1] in ['jpg', 'png']:
img_url = page_url
else:
webpage = readHTML(page_url)
img_url = webpage.xpath('//*[@id="comic_page"]')[0].get('src')
downloadImage(img_url, page_file_path + "." + urlparse.urlparse(img_url).path.split('.')[-1])
## Subclass representing a manga chapter from Starkana
class MangaChapterStarkana(MangaChapter):
def __init__(self, manga_name, chapter_number, chapter_url, chapter_root_path):
super(MangaChapterStarkana, self).__init__(manga_name, chapter_number, chapter_url, chapter_root_path)
def retrieveAllPages(self):
## Look at the options of select element at //*[@id="page_switch"]
## Take the value for each (page url) and save them
webpage = readHTML(self.chapter_url)
s = webpage.xpath("//*[@id='page_switch']")[0]
for option in s.xpath('.//option[@value]'):
self.addPage(option.get('value'))
def downloadPage(self, page_url, page_file_path):
## Get @src attribute of element at //*[@id="pic"/div/img]
webpage = readHTML(page_url)
img_url = webpage.xpath('//*[@id="pic"]/div/img')[0].get('src')
downloadImage(img_url, page_file_path + "." + urlparse.urlparse(img_url).path.split('.')[-1])
## Generic class representing a manga
class Manga(object):
def __init__(self, manga_url, manga_name=None):
self.name = manga_name
self.url = manga_url
self.chapter_list = []
def createFolder(self, path):
path = cleanPath(path)
if not os.path.exists(path):
os.makedirs(path)
with open(path + '/mangadl.link', 'w') as f:
f.write(self.url)
def addMangaChapter(self, manga_chapter):
self.chapter_list.insert(0, manga_chapter)
if __DEBUG__:
print "Added chapter " + manga_chapter.chapter_number
def retrieveAllChapters(self):
raise NotImplementedError # To be overridden in subclasses
## Subclass representing a manga hosted in Batoto
class MangaBatoto(Manga):
def __init__(self, manga_url, manga_name=None):
super(MangaBatoto, self).__init__(manga_url, manga_name)
## Regular expressions for parsing the chapter headings and retrieve volume number, chapter number, title etc
self.CHAPTER_TITLE_PATTERN_CHECK_VOLUME = '^Vol\..+'
self.CHAPTER_TITLE_PATTERN_WITH_VOLUME = '^Vol\.\s*([0-9]+|Extra)\s*Ch.\s*([0-9\.vA-Za-z-\(\)]+):?\s+(.+)'
self.CHAPTER_TITLE_PATTERN_NO_VOLUME = '^Ch.\s*([0-9\.vA-Za-z-\(\)]+):?\s+(.+)'
def retrieveAllChapters(self):
webpage = readHTML(self.url)
## print tostring(page) # For testing only
if self.name is None:
self.name = webpage.xpath('//h1[@class="ipsType_pagetitle"]')[0].text.strip()
print "Set name to: " + self.name
assert(self.name is not None)
ch_path = "Batoto - " + self.name
self.createFolder(ch_path)
for ch_row in webpage.xpath('//table[@class="ipb_table chapters_list"]/tbody/tr')[1:]:
if ch_row.get('class') == 'row lang_English chapter_row':
ch_a = ch_row.xpath('.//td')[0].xpath('.//a')[0]
ch_url = ch_a.get('href')
ch_name = unicode(ch_a.text_content().strip(' \t\n\r')).translate(dict.fromkeys(map(ord, '\\/'), None))
if __DEBUG__:
print ch_name
vol_no = None
ch_no = None
ch_title = None
if re.match(self.CHAPTER_TITLE_PATTERN_CHECK_VOLUME, ch_name):
m = re.match(self.CHAPTER_TITLE_PATTERN_WITH_VOLUME, ch_name)
vol_no = m.group(1)
ch_no = m.group(2)
ch_title = m.group(3)
else:
m = re.match(self.CHAPTER_TITLE_PATTERN_NO_VOLUME, ch_name)
ch_no = m.group(1)
ch_title = m.group(2)
assert(ch_no is not None) # Chapter number is mandatory
gr_a = ch_row.xpath('.//td')[2].xpath('.//a')[0]
gr_name = unicode(gr_a.text.strip(' \t\n\r')).translate(dict.fromkeys(map(ord, '\\/'), None))
self.addMangaChapter(MangaChapterBatoto(self.name, ch_no, ch_url, ch_path, ch_title, vol_no, gr_name))
## Subclass representing a manga hosted in Starkana
class MangaStarkana(Manga):
def __init__(self, manga_url, manga_name=None):
super(MangaStarkana, self).__init__(manga_url, manga_name)
def retrieveAllChapters(self):
webpage = readHTML(self.url)
## print tostring(page) # For testing only
if self.name is None:
if webpage.xpath('//meta[@property="og:title"]'):
self.name = webpage.xpath('//meta[@property="og:title"]/@content')[0].strip()
else:
self.name = self.url.split('/')[-1].replace('_', ' ')
print "Set name to: " + self.name
assert(self.name is not None)
ch_path = "Starkana - " + self.name
self.createFolder(ch_path)
for ch_row in webpage.xpath('//a[@class="download-link"]'):
ch_no = None
ch_url = ch_row.get('href')
ch_no = ch_url.split('/')[-1]
assert(ch_no is not None)
self.addMangaChapter(MangaChapterStarkana(self.name, ch_no, ch_url, ch_path))
# Data structures that help instantiating the right subclasses based on URL
URL_TYPES = {'http://www.batoto.net' : {'url' : '(http://)?(www\.)?(batoto\.net).+-r[0-9]+', 'manga' : MangaBatoto, 'mangachapter' : MangaChapterBatoto},
'http://www.bato.to' : {'url' : '(http://)?(www\.)?(bato\.to).+-r[0-9]+', 'manga' : MangaBatoto, 'mangachapter' : MangaChapterBatoto},
'http://www.starkana.com' : {'url' : '(http://)?(www\.)?starkana\.com/manga/[0A-Z]/.+', 'manga' : MangaStarkana, 'mangachapter' : MangaChapterStarkana}
}
# Parse command line arguments
parser = argparse.ArgumentParser(description = 'Download manga chapters from collection sites.')
parser.add_argument('--Debug', '-D', help = 'Run in debug mode', action = 'store_true')
parser.add_argument('--Test', '-T', help = 'Run in test mode (downloads suppressed)', action = 'store_true')
parser.add_argument('--verbose', '-v', help = 'Enable verbose mode', action = 'store_true')
group = parser.add_mutually_exclusive_group(required = False)
group.add_argument('--reload', '-r', help = 'Update all manga folders in current directory', action = 'store_true')
group.add_argument('--update', '-u', help = 'Update the manga at the url(s) provided', action = 'append')
args = vars(parser.parse_args())
url_list = []
__DEBUG__ = args['Debug']
__DOWNLOAD__ = not args['Test']
if args['verbose']:
verbose = True
else:
verbose = False
if args['reload']:
for subdir in filter(lambda f: os.path.isdir(f), glob.glob('*')):
if glob.glob(subdir + '/mangadl.link'):
with open(subdir + '/mangadl.link', 'r') as f:
url_list.append(f.read())
elif glob.glob(subdir + '/* Chapter *'):
url_input = raw_input("Enter URL for folder " + subdir + " (Press ENTER to skip) : ")
url_list.append(url_input)
elif args['update']:
url_list = args['update']
else:
url_input = raw_input("Enter URL: ")
url_list.append(url_input)
url_list = filter(None, url_list)
assert(url_list)
for url in url_list:
url_type = checkURLType(url)
manga = URL_TYPES[url_type]['manga'](url) # Instantiate manga object
manga.retrieveAllChapters() # Add all chapters to it
chapter_count = len(manga.chapter_list)
curr_download_count = 0
for chapter in manga.chapter_list:
if __DEBUG__:
chapter.show()
sys.stdout.write("\rDownloaded " + str(curr_download_count) + "/" + str(chapter_count) + " chapters.")
sys.stdout.flush()
if __DOWNLOAD__:
if __DEBUG__:
print "\nDownloading chapter..."
chapter.downloadChapter()
curr_download_count = curr_download_count + 1
sys.stdout.write("\rDownloaded " + str(curr_download_count) + "/" + str(chapter_count) + " chapters.")
sys.stdout.flush()
print "\n"
print "Finished."
exit(0)
|
|
import pytest
import sqlalchemy as sa
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy_utils.relationships import select_correlated_expression
@pytest.fixture
def group_user_tbl(Base):
return sa.Table(
'group_user',
Base.metadata,
sa.Column('user_id', sa.Integer, sa.ForeignKey('user.id')),
sa.Column('group_id', sa.Integer, sa.ForeignKey('group.id'))
)
@pytest.fixture
def group_tbl(Base):
class Group(Base):
__tablename__ = 'group'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
return Group
@pytest.fixture
def friendship_tbl(Base):
return sa.Table(
'friendships',
Base.metadata,
sa.Column(
'friend_a_id',
sa.Integer,
sa.ForeignKey('user.id'),
primary_key=True
),
sa.Column(
'friend_b_id',
sa.Integer,
sa.ForeignKey('user.id'),
primary_key=True
)
)
@pytest.fixture
def User(Base, group_user_tbl, friendship_tbl):
class User(Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
groups = sa.orm.relationship(
'Group',
secondary=group_user_tbl,
backref='users'
)
# this relationship is used for persistence
friends = sa.orm.relationship(
'User',
secondary=friendship_tbl,
primaryjoin=id == friendship_tbl.c.friend_a_id,
secondaryjoin=id == friendship_tbl.c.friend_b_id,
)
friendship_union = (
sa.select([
friendship_tbl.c.friend_a_id,
friendship_tbl.c.friend_b_id
]).union(
sa.select([
friendship_tbl.c.friend_b_id,
friendship_tbl.c.friend_a_id]
)
).alias()
)
User.all_friends = sa.orm.relationship(
'User',
secondary=friendship_union,
primaryjoin=User.id == friendship_union.c.friend_a_id,
secondaryjoin=User.id == friendship_union.c.friend_b_id,
viewonly=True,
order_by=User.id
)
return User
@pytest.fixture
def Category(Base, group_user_tbl, friendship_tbl):
class Category(Base):
__tablename__ = 'category'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
created_at = sa.Column(sa.DateTime)
parent_id = sa.Column(sa.Integer, sa.ForeignKey('category.id'))
parent = sa.orm.relationship(
'Category',
backref='subcategories',
remote_side=[id],
order_by=id
)
return Category
@pytest.fixture
def Article(Base, Category, User):
class Article(Base):
__tablename__ = 'article'
id = sa.Column('_id', sa.Integer, primary_key=True)
name = sa.Column(sa.String)
name_synonym = sa.orm.synonym('name')
@hybrid_property
def name_upper(self):
return self.name.upper() if self.name else None
@name_upper.expression
def name_upper(cls):
return sa.func.upper(cls.name)
content = sa.Column(sa.String)
category_id = sa.Column(sa.Integer, sa.ForeignKey(Category.id))
category = sa.orm.relationship(Category, backref='articles')
author_id = sa.Column(sa.Integer, sa.ForeignKey(User.id))
author = sa.orm.relationship(
User,
primaryjoin=author_id == User.id,
backref='authored_articles'
)
owner_id = sa.Column(sa.Integer, sa.ForeignKey(User.id))
owner = sa.orm.relationship(
User,
primaryjoin=owner_id == User.id,
backref='owned_articles'
)
return Article
@pytest.fixture
def Comment(Base, Article, User):
class Comment(Base):
__tablename__ = 'comment'
id = sa.Column(sa.Integer, primary_key=True)
content = sa.Column(sa.String)
article_id = sa.Column(sa.Integer, sa.ForeignKey(Article.id))
article = sa.orm.relationship(Article, backref='comments')
author_id = sa.Column(sa.Integer, sa.ForeignKey(User.id))
author = sa.orm.relationship(User, backref='comments')
Article.comment_count = sa.orm.column_property(
sa.select([sa.func.count(Comment.id)])
.where(Comment.article_id == Article.id)
.correlate_except(Article)
)
return Comment
@pytest.fixture
def model_mapping(Article, Category, Comment, group_tbl, User):
return {
'articles': Article,
'categories': Category,
'comments': Comment,
'groups': group_tbl,
'users': User
}
@pytest.fixture
def init_models(Article, Category, Comment, group_tbl, User):
pass
@pytest.fixture
def dataset(
session,
User,
group_tbl,
Article,
Category,
Comment
):
group = group_tbl(name='Group 1')
group2 = group_tbl(name='Group 2')
user = User(id=1, name='User 1', groups=[group, group2])
user2 = User(id=2, name='User 2')
user3 = User(id=3, name='User 3', groups=[group])
user4 = User(id=4, name='User 4', groups=[group2])
user5 = User(id=5, name='User 5')
user.friends = [user2]
user2.friends = [user3, user4]
user3.friends = [user5]
article = Article(
name='Some article',
author=user,
owner=user2,
category=Category(
id=1,
name='Some category',
subcategories=[
Category(
id=2,
name='Subcategory 1',
subcategories=[
Category(
id=3,
name='Subsubcategory 1',
subcategories=[
Category(
id=5,
name='Subsubsubcategory 1',
),
Category(
id=6,
name='Subsubsubcategory 2',
)
]
)
]
),
Category(id=4, name='Subcategory 2'),
]
),
comments=[
Comment(
content='Some comment',
author=user
)
]
)
session.add(user3)
session.add(user4)
session.add(article)
session.commit()
@pytest.mark.usefixtures('dataset', 'postgresql_dsn')
class TestSelectCorrelatedExpression(object):
@pytest.mark.parametrize(
('model_key', 'related_model_key', 'path', 'result'),
(
(
'categories',
'categories',
'subcategories',
[
(1, 2),
(2, 1),
(3, 2),
(4, 0),
(5, 0),
(6, 0)
]
),
(
'articles',
'comments',
'comments',
[
(1, 1),
]
),
(
'users',
'groups',
'groups',
[
(1, 2),
(2, 0),
(3, 1),
(4, 1),
(5, 0)
]
),
(
'users',
'users',
'all_friends',
[
(1, 1),
(2, 3),
(3, 2),
(4, 1),
(5, 1)
]
),
(
'users',
'users',
'all_friends.all_friends',
[
(1, 3),
(2, 2),
(3, 3),
(4, 3),
(5, 2)
]
),
(
'users',
'users',
'groups.users',
[
(1, 3),
(2, 0),
(3, 2),
(4, 2),
(5, 0)
]
),
(
'groups',
'articles',
'users.authored_articles',
[
(1, 1),
(2, 1),
]
),
(
'categories',
'categories',
'subcategories.subcategories',
[
(1, 1),
(2, 2),
(3, 0),
(4, 0),
(5, 0),
(6, 0)
]
),
(
'categories',
'categories',
'subcategories.subcategories.subcategories',
[
(1, 2),
(2, 0),
(3, 0),
(4, 0),
(5, 0),
(6, 0)
]
),
)
)
def test_returns_correct_results(
self,
session,
model_mapping,
model_key,
related_model_key,
path,
result
):
model = model_mapping[model_key]
alias = sa.orm.aliased(model_mapping[related_model_key])
aggregate = select_correlated_expression(
model,
sa.func.count(sa.distinct(alias.id)),
path,
alias
)
query = session.query(
model.id,
aggregate.label('count')
).order_by(model.id)
assert query.all() == result
def test_order_by_intermediate_table_column(
self,
session,
model_mapping,
group_user_tbl
):
model = model_mapping['users']
alias = sa.orm.aliased(model_mapping['groups'])
aggregate = select_correlated_expression(
model,
sa.func.json_build_object('id', alias.id),
'groups',
alias,
order_by=[group_user_tbl.c.user_id]
).alias('test')
# Just check that the query execution doesn't fail because of wrongly
# constructed aliases
assert session.execute(aggregate)
def test_with_non_aggregate_function(
self,
session,
User,
Article
):
aggregate = select_correlated_expression(
Article,
sa.func.json_build_object('name', User.name),
'comments.author',
User
)
query = session.query(
Article.id,
aggregate.label('author_json')
).order_by(Article.id)
result = query.all()
assert result == [
(1, {'name': 'User 1'})
]
|
|
import sys
from genStubs import *
stub = Stubs( "systemMessages", sys.argv[1], sys.argv[2] )
stub.include( "nanopb/IMessage.h" )
stub.include( "systemMessages/AGLMsg.pb.h" )
stub.include( "systemMessages/AGLOffsetMsg.pb.h" )
stub.include( "systemMessages/AGLRawMsg.pb.h" )
stub.include( "systemMessages/AbortLaunchMsg.pb.h" )
stub.include( "systemMessages/AccelGyroDataMsg.pb.h" )
stub.include( "systemMessages/AccelGyroDataRaw.pb.h" )
stub.include( "systemMessages/ActiveControlSourceNotification.pb.h" )
stub.include( "systemMessages/ActiveManeuverSourceNotification.pb.h" )
stub.include( "systemMessages/ActuatorConstants.pb.h" )
stub.include( "systemMessages/ActuatorPictureMsg.pb.h" )
stub.include( "systemMessages/ActuatorPortCalibration.pb.h" )
stub.include( "systemMessages/ActuatorPortConfigMsg.pb.h" )
stub.include( "systemMessages/ActuatorPowerBusMsg.pb.h" )
stub.include( "systemMessages/ActuatorTakePicture.pb.h" )
stub.include( "systemMessages/AeroTerminateMsg.pb.h" )
stub.include( "systemMessages/AirmailDebugLogSettingsMsg.pb.h" )
stub.include( "systemMessages/AirmailPoolStatsMsg.pb.h" )
stub.include( "systemMessages/AirspeedCalibrationDataMsg.pb.h" )
stub.include( "systemMessages/AltMSLCorrection.pb.h" )
stub.include( "systemMessages/AnnounceMsg.pb.h" )
stub.include( "systemMessages/AttCtrlConfig.pb.h" )
stub.include( "systemMessages/AuxControlMix.pb.h" )
stub.include( "systemMessages/AwxHeaderMsg.pb.h" )
stub.include( "systemMessages/BoardStatus.pb.h" )
stub.include( "systemMessages/ClientRequest.pb.h" )
stub.include( "systemMessages/ConnectionStatus.pb.h" )
stub.include( "systemMessages/ContingencyEventMap.pb.h" )
stub.include( "systemMessages/ContingencyEventStatus.pb.h" )
stub.include( "systemMessages/ControlLog.pb.h" )
stub.include( "systemMessages/ControlLogRateConfig.pb.h" )
stub.include( "systemMessages/ControlRequest.pb.h" )
stub.include( "systemMessages/DateOfLastConfigurationMsg.pb.h" )
stub.include( "systemMessages/DeviceManagerMsgs.pb.h" )
stub.include( "systemMessages/EffectorCmdsMsg.pb.h" )
stub.include( "systemMessages/EffectorStatusMsg.pb.h" )
stub.include( "systemMessages/EffectorSurfaceMap.pb.h" )
stub.include( "systemMessages/EthernetStatusMsg.pb.h" )
stub.include( "systemMessages/Example.pb.h" )
stub.include( "systemMessages/FileTransferMsg.pb.h" )
stub.include( "systemMessages/FlightStatus.pb.h" )
stub.include( "systemMessages/GCSConnectivityStatus.pb.h" )
stub.include( "systemMessages/GCSJobInfoMsg.pb.h" )
stub.include( "systemMessages/GPSData.pb.h" )
stub.include( "systemMessages/GPSRestartMsg.pb.h" )
stub.include( "systemMessages/GPSStatus.pb.h" )
stub.include( "systemMessages/Geofence.pb.h" )
stub.include( "systemMessages/GuidanceConfig.pb.h" )
stub.include( "systemMessages/HealthEventMsg.pb.h" )
stub.include( "systemMessages/HobbsMeter.pb.h" )
stub.include( "systemMessages/IMUOrientationConfig.pb.h" )
stub.include( "systemMessages/INSAccelData.pb.h" )
stub.include( "systemMessages/INSAncillaryData.pb.h" )
stub.include( "systemMessages/INSAttitudeData.pb.h" )
stub.include( "systemMessages/INSConfigMsg.pb.h" )
stub.include( "systemMessages/INSCorrectionData.pb.h" )
stub.include( "systemMessages/INSCorrectionRequest.pb.h" )
stub.include( "systemMessages/INSEnums.pb.h" )
stub.include( "systemMessages/INSErrorData.pb.h" )
stub.include( "systemMessages/INSLog.pb.h" )
stub.include( "systemMessages/INSMessageComponents.pb.h" )
stub.include( "systemMessages/INSPosVelData.pb.h" )
stub.include( "systemMessages/INSStatusData.pb.h" )
stub.include( "systemMessages/KillMode.pb.h" )
stub.include( "systemMessages/LaneSplitter.pb.h" )
stub.include( "systemMessages/LaneSplitterStatsMsg.pb.h" )
stub.include( "systemMessages/LogInformationEntry.pb.h" )
stub.include( "systemMessages/LogManagement.pb.h" )
stub.include( "systemMessages/LogRequestMsg.pb.h" )
stub.include( "systemMessages/MPUCalConfig.pb.h" )
stub.include( "systemMessages/MRAirframeConfig.pb.h" )
stub.include( "systemMessages/MagCalibrationParameters.pb.h" )
stub.include( "systemMessages/MagData.pb.h" )
stub.include( "systemMessages/MagDataRaw.pb.h" )
stub.include( "systemMessages/MagOrientationConfigMsg.pb.h" )
stub.include( "systemMessages/Maneuver.pb.h" )
stub.include( "systemMessages/ManeuverExecutionStatus.pb.h" )
stub.include( "systemMessages/ManeuverPauseResumeMsg.pb.h" )
stub.include( "systemMessages/MapRcInputToFlightChannelMsg.pb.h" )
stub.include( "systemMessages/Menagerie.pb.h" )
stub.include( "systemMessages/MfgParamsMsg.pb.h" )
stub.include( "systemMessages/Mission.pb.h" )
stub.include( "systemMessages/MissionExec.pb.h" )
stub.include( "systemMessages/MissionList.pb.h" )
stub.include( "systemMessages/MissionStatus.pb.h" )
stub.include( "systemMessages/ModemConfig.pb.h" )
stub.include( "systemMessages/ModemGetRadioType.pb.h" )
stub.include( "systemMessages/ModemLinkStatus.pb.h" )
stub.include( "systemMessages/ModemPower.pb.h" )
stub.include( "systemMessages/NakMsg.pb.h" )
stub.include( "systemMessages/OperatorModuleConfig.pb.h" )
stub.include( "systemMessages/PWMRateMsg.pb.h" )
stub.include( "systemMessages/PayloadPower.pb.h" )
stub.include( "systemMessages/PosVelCtrlConfig.pb.h" )
stub.include( "systemMessages/PowerManagerConfig.pb.h" )
stub.include( "systemMessages/PowerStatus.pb.h" )
stub.include( "systemMessages/PressureData.pb.h" )
stub.include( "systemMessages/PrimaryControlMix.pb.h" )
stub.include( "systemMessages/PrimitiveDataTypes.pb.h" )
stub.include( "systemMessages/RcChannels.pb.h" )
stub.include( "systemMessages/RcInputCalibrationMsg.pb.h" )
stub.include( "systemMessages/RcInputMsg.pb.h" )
stub.include( "systemMessages/RebootRequestMsg.pb.h" )
stub.include( "systemMessages/RgbLed.pb.h" )
stub.include( "systemMessages/STM32OTPParams.pb.h" )
stub.include( "systemMessages/SaveConfigConstants.pb.h" )
stub.include( "systemMessages/ServerResponse.pb.h" )
stub.include( "systemMessages/Shape2D.pb.h" )
stub.include( "systemMessages/SimConfigurationRequest.pb.h" )
stub.include( "systemMessages/SimControlRequest.pb.h" )
stub.include( "systemMessages/StateMachineEnums.pb.h" )
stub.include( "systemMessages/SystemEnums.pb.h" )
stub.include( "systemMessages/SystemMode.pb.h" )
stub.include( "systemMessages/SystemPowerStatus.pb.h" )
stub.include( "systemMessages/TelemetryWatchdogConfig.pb.h" )
stub.include( "systemMessages/TemperatureData.pb.h" )
stub.include( "systemMessages/TestMessage.pb.h" )
stub.include( "systemMessages/ThreadStatsMsg.pb.h" )
stub.include( "systemMessages/TimeStamp.pb.h" )
stub.include( "systemMessages/VehicleDescriptionMessage.pb.h" )
stub.include( "systemMessages/VersionInfoEntry.pb.h" )
stub.newline()
stub.addLine( "typedef int16_t msgSize_t;" )
stub.newline()
stub.stubSysMsg( "CAGLMsg" )
stub.stubSysMsg( "CAGLOffsetMsg" )
stub.stubSysMsg( "CAGLRawMsg" )
stub.stubSysMsg( "CAccelGyroDataMsg" )
stub.stubSysMsg( "CAccelGyroDataRaw" )
stub.stubSysMsg( "CActiveControlSourceNotification" )
stub.stubSysMsg( "CActiveManeuverSourceNotification" )
stub.stubSysMsg( "CActuatorPictureMsg" )
stub.stubSysMsg( "CActuatorPortCalibration" )
stub.stubSysMsg( "CActuatorPortConfigMsg" )
stub.stubSysMsg( "CActuatorPowerBusMsg" )
stub.stubSysMsg( "CActuatorTakePictureMsg" )
stub.stubSysMsg( "CAirmailDebugLogSettingsMsg" )
stub.stubSysMsg( "CAirmailPoolStatsMsg" )
stub.stubSysMsg( "CAirspeedCalibrationDataMsg" )
stub.stubSysMsg( "CAltMSLCorrection" )
stub.stubSysMsg( "CAnnounceMsg" )
stub.stubSysMsg( "CAttCtrlConfig" )
stub.stubSysMsg( "CAuxControlMix" )
stub.stubSysMsg( "CAwxHeaderMsg" )
stub.stubSysMsg( "CBoardStatus" )
stub.stubSysMsg( "CClientRequest" )
stub.stubSysMsg( "CRegisterAsPeriodicPublisherMsg" )
stub.stubSysMsg( "CUnregisterAsPublisherMsg" )
stub.stubSysMsg( "CSubscribePeriodicMsg" )
stub.stubSysMsg( "CUnsubscribeTopicMsg" )
stub.stubSysMsg( "CRegisterAsCallerMsg" )
stub.stubSysMsg( "CUnregisterAsCallerMsg" )
stub.stubSysMsg( "CRegisterAsProviderMsg" )
stub.stubSysMsg( "CUnregisterAsProviderMsg" )
stub.stubSysMsg( "CCallServiceMsg" )
stub.stubSysMsg( "CPublishTopicMsg" )
stub.stubSysMsg( "CClientServiceResponseMsg" )
stub.stubSysMsg( "CConnectionStatus" )
stub.stubSysMsg( "CContingencyEventMap" )
stub.stubSysMsg( "CContingencyEventStatus" )
stub.stubSysMsg( "CControlLog" )
stub.stubSysMsg( "CControlLogRateConfig" )
stub.stubSysMsg( "CControlRequest" )
stub.stubSysMsg( "CDateOfLastConfigurationMsg" )
stub.stubSysMsg( "CSignatureIdMsg" )
stub.stubSysMsg( "CServiceInfoMsg" )
stub.stubSysMsg( "CProviderIdMsg" )
stub.stubSysMsg( "CSignatureHashMsg" )
stub.stubSysMsg( "CSignatureHashAndProviderMsg" )
stub.stubSysMsg( "CQueryResultMsg" )
stub.stubSysMsg( "CUniqueIdMsg" )
stub.stubSysMsg( "CNodeInfoMsg" )
stub.stubSysMsg( "CNodeIdAckMsg" )
stub.stubSysMsg( "CNodeIdMsg" )
stub.stubSysMsg( "CNodeIdListMsg" )
stub.stubSysMsg( "CNodeInfoFilterMsg" )
stub.stubSysMsg( "CEffectorCmdsMsg" )
stub.stubSysMsg( "CEffectorStatusMsg" )
stub.stubSysMsg( "CEffectorSurfaceMap" )
stub.stubSysMsg( "CEthernetPortStatusMsg" )
stub.stubSysMsg( "CEthernetStatusMsg" )
stub.stubSysMsg( "CListFilesRequest" )
stub.stubSysMsg( "CFileInfo" )
stub.stubSysMsg( "CListFilesResponse" )
stub.stubSysMsg( "CFileTransferMsg" )
stub.stubSysMsg( "CFlightStatus" )
stub.stubSysMsg( "CGCSConnectivityStatus" )
stub.stubSysMsg( "CGCSJobInfoMsg" )
stub.stubSysMsg( "CGPSData" )
stub.stubSysMsg( "CGPSRestartMsg" )
stub.stubSysMsg( "CGPSStatus" )
stub.stubSysMsg( "CGeofence" )
stub.stubSysMsg( "CGuidanceConfig" )
stub.stubSysMsg( "CHealthEventMsg" )
stub.stubSysMsg( "CHobbsMeterMsg" )
stub.stubSysMsg( "CIMUOrientationConfig" )
stub.stubSysMsg( "CINSAncillaryData" )
stub.stubSysMsg( "CINSAttitudeData" )
stub.stubSysMsg( "CINSConfigMsg" )
stub.stubSysMsg( "CINSCorrectionData" )
stub.stubSysMsg( "CINSCorrectionRequest" )
stub.stubSysMsg( "CINSErrorData" )
stub.stubSysMsg( "CINSLog" )
stub.stubSysMsg( "CVectorXYZ" )
stub.stubSysMsg( "CVectorNED" )
stub.stubSysMsg( "CDCM" )
stub.stubSysMsg( "CINSPosVelData" )
stub.stubSysMsg( "CINSStatusData" )
stub.stubSysMsg( "CKillCh" )
stub.stubSysMsg( "CKillModeMsg" )
stub.stubSysMsg( "CLaneSplitterStatsMsg" )
stub.stubSysMsg( "CLogEntryProvider" )
stub.stubSysMsg( "CLogMgmtCmd" )
stub.stubSysMsg( "CLogMgmtResponse" )
stub.stubSysMsg( "CLogRequestMsg" )
stub.stubSysMsg( "CMPUCalConfig" )
stub.stubSysMsg( "CMRAirframeConfig" )
stub.stubSysMsg( "CMagCalibrationParameters" )
stub.stubSysMsg( "CMagData" )
stub.stubSysMsg( "CMagDataRaw" )
stub.stubSysMsg( "CMagOrientationConfigMsg" )
stub.stubSysMsg( "CManeuver" )
stub.stubSysMsg( "CManeuverExecutionStatus" )
stub.stubSysMsg( "CManeuverPauseResumeMsg" )
stub.stubSysMsg( "CMapRcInputToFlightChannelMsg" )
stub.stubSysMsg( "CpointType" )
stub.stubSysMsg( "CMR_FLT_trackToPt" )
stub.stubSysMsg( "CMR_FLT_holdAtPt" )
stub.stubSysMsg( "CMR_FLT_manAttitude" )
stub.stubSysMsg( "CMR_FLT_manVelocity" )
stub.stubSysMsg( "CMR_TKO_liftoffMSL" )
stub.stubSysMsg( "CMR_LND_descendMSL" )
stub.stubSysMsg( "CMR_FLT_stopAndHold" )
stub.stubSysMsg( "CMR_LND_stopAndDescend" )
stub.stubSysMsg( "CMR_LND_attitudeOnly" )
stub.stubSysMsg( "CMR_FLT_minAltGoto" )
stub.stubSysMsg( "CMR_FLT_photoSurvey" )
stub.stubSysMsg( "CMR_FLT_surveyPoint" )
stub.stubSysMsg( "CLND_terminate" )
stub.stubSysMsg( "CFW_FLT_manAttitude" )
stub.stubSysMsg( "CFW_FLT_manFull" )
stub.stubSysMsg( "CFW_FLT_circle" )
stub.stubSysMsg( "CFW_FLT_slantTrackTo" )
stub.stubSysMsg( "CFW_FLT_directTo" )
stub.stubSysMsg( "CFW_TKO_launch" )
stub.stubSysMsg( "CFW_LND_touchdown" )
stub.stubSysMsg( "CFW_LND_glidingCircle" )
stub.stubSysMsg( "CFW_LND_attitudeOnly" )
stub.stubSysMsg( "CFW_FLT_photoSurvey" )
stub.stubSysMsg( "CMfgParamsMsg" )
stub.stubSysMsg( "CMission" )
stub.stubSysMsg( "CMissionExec" )
stub.stubSysMsg( "CMissionList" )
stub.stubSysMsg( "CMissionStatus" )
stub.stubSysMsg( "CRadioConfigMsg" )
stub.stubSysMsg( "CRadioConfigOOBMsg" )
stub.stubSysMsg( "CRadioTypeMsg" )
stub.stubSysMsg( "CradioLinkStatusMsg" )
stub.stubSysMsg( "CradioPowerMsg" )
stub.stubSysMsg( "CNakMsg" )
stub.stubSysMsg( "COperatorModuleConfig" )
stub.stubSysMsg( "CPWMRateMsg" )
stub.stubSysMsg( "CPayloadPowerMsg" )
stub.stubSysMsg( "CPosVelCtrlConfig" )
stub.stubSysMsg( "CPowerManagerConfig" )
stub.stubSysMsg( "CCircuitState" )
stub.stubSysMsg( "CPowerStatusMsg" )
stub.stubSysMsg( "CPressureData" )
stub.stubSysMsg( "CPrimaryControlMix" )
stub.stubSysMsg( "CBoolMsg" )
stub.stubSysMsg( "CSint32Msg" )
stub.stubSysMsg( "CUint32Msg" )
stub.stubSysMsg( "CFloatMsg" )
stub.stubSysMsg( "CRcInputCalibrationMsg" )
stub.stubSysMsg( "CRcInputMsg" )
stub.stubSysMsg( "CRebootRequestMsg" )
stub.stubSysMsg( "CRgbLedMsg" )
stub.stubSysMsg( "CSaveConfigMsg" )
stub.stubSysMsg( "CServerResponse" )
stub.stubSysMsg( "CTopicDataMsg" )
stub.stubSysMsg( "CServiceCallResultMsg" )
stub.stubSysMsg( "CServiceCallRequestMsg" )
stub.stubSysMsg( "CServiceCallRegistrationAck" )
stub.stubSysMsg( "CAcknowledgementMsg" )
stub.stubSysMsg( "CintPointType" )
stub.stubSysMsg( "CCircleType" )
stub.stubSysMsg( "CPolygonType" )
stub.stubSysMsg( "CShape2D" )
stub.stubSysMsg( "CSimConfigurationRequest" )
stub.stubSysMsg( "CSimControlRequestMsg" )
stub.stubSysMsg( "CSystemMode" )
stub.stubSysMsg( "CSystemPowerStatusMsg" )
stub.stubSysMsg( "CTelemetryWatchdogConfig" )
stub.stubSysMsg( "CTemperatureData" )
stub.stubSysMsg( "CTestMessage" )
stub.stubSysMsg( "CThreadStatsMsg" )
stub.stubSysMsg( "CTimeStamp" )
stub.stubSysMsg( "CVehicleDescriptionMessage" )
stub.stubSysMsg( "CVersionEntry" )
stub.stubSysMsg( "CVersionMsg" )
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import uuid
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IgnoreArg # noqa
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:admin:instances:index')
class InstanceViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.nova: ('flavor_list', 'server_list',
'extension_supported',),
api.keystone: ('tenant_list',),
api.network: ('servers_update_addresses',)})
def test_index(self):
servers = self.servers.list()
flavors = self.flavors.list()
tenants = self.tenants.list()
api.nova.extension_supported('AdminActions', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.keystone.tenant_list(IsA(http.HttpRequest)).\
AndReturn([tenants, False])
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True, search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers,
all_tenants=True)
api.nova.flavor_list(IsA(http.HttpRequest)).AndReturn(flavors)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'admin/instances/index.html')
instances = res.context['table'].data
self.assertItemsEqual(instances, servers)
@test.create_stubs({api.nova: ('flavor_list', 'flavor_get',
'server_list', 'extension_supported',),
api.keystone: ('tenant_list',),
api.network: ('servers_update_addresses',)})
def test_index_flavor_list_exception(self):
servers = self.servers.list()
tenants = self.tenants.list()
flavors = self.flavors.list()
full_flavors = OrderedDict([(f.id, f) for f in flavors])
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True, search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers,
all_tenants=True)
api.nova.extension_supported('AdminActions', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)). \
AndRaise(self.exceptions.nova)
api.keystone.tenant_list(IsA(http.HttpRequest)).\
AndReturn([tenants, False])
for server in servers:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"]). \
AndReturn(full_flavors[server.flavor["id"]])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'admin/instances/index.html')
instances = res.context['table'].data
self.assertItemsEqual(instances, servers)
@test.create_stubs({api.nova: ('flavor_list', 'flavor_get',
'server_list', 'extension_supported', ),
api.keystone: ('tenant_list',),
api.network: ('servers_update_addresses',)})
def test_index_flavor_get_exception(self):
servers = self.servers.list()
flavors = self.flavors.list()
tenants = self.tenants.list()
# UUIDs generated using indexes are unlikely to match
# any of existing flavor ids and are guaranteed to be deterministic.
for i, server in enumerate(servers):
server.flavor['id'] = str(uuid.UUID(int=i))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True, search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers,
all_tenants=True)
api.nova.extension_supported('AdminActions', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)). \
AndReturn(flavors)
api.keystone.tenant_list(IsA(http.HttpRequest)).\
AndReturn([tenants, False])
for server in servers:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"]). \
AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
instances = res.context['table'].data
self.assertTemplateUsed(res, 'admin/instances/index.html')
# Since error messages produced for each instance are identical,
# there will be only one error message for all instances
# (messages de-duplication).
self.assertMessageCount(res, error=1)
self.assertItemsEqual(instances, servers)
@test.create_stubs({api.nova: ('server_list',)})
def test_index_server_list_exception(self):
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True, search_opts=search_opts) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'admin/instances/index.html')
self.assertEqual(len(res.context['instances_table'].data), 0)
@test.create_stubs({api.nova: ('server_get', 'flavor_get',
'extension_supported', ),
api.network: ('servers_update_addresses',),
api.keystone: ('tenant_get',)})
def test_ajax_loading_instances(self):
server = self.servers.first()
flavor = self.flavors.list()[0]
tenant = self.tenants.list()[0]
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.nova.extension_supported('AdminActions', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_get(IsA(http.HttpRequest),
server.flavor['id']).AndReturn(flavor)
api.keystone.tenant_get(IsA(http.HttpRequest),
server.tenant_id,
admin=True).AndReturn(tenant)
self.mox.ReplayAll()
url = (INDEX_URL +
"?action=row_update&table=instances&obj_id=" + server.id)
res = self.client.get(url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTemplateUsed(res, "horizon/common/_data_table_row.html")
self.assertContains(res, "test_tenant", 1, 200)
self.assertContains(res, "instance-host", 1, 200)
# two instances of name, other name comes from row data-display
self.assertContains(res, "server_1", 2, 200)
self.assertContains(res, "10.0.0.1", 1, 200)
self.assertContains(res, "RAM</th><td>512MB", 1, 200)
self.assertContains(res, "VCPUs</th><td>1", 1, 200)
self.assertContains(res, "Size</th><td>0 GB", 1, 200)
self.assertContains(res, "Active", 1, 200)
self.assertContains(res, "Running", 1, 200)
@test.create_stubs({api.nova: ('flavor_list', 'server_list',
'extension_supported', ),
api.keystone: ('tenant_list',),
api.network: ('servers_update_addresses',)})
def test_index_options_before_migrate(self):
servers = self.servers.list()
api.keystone.tenant_list(IsA(http.HttpRequest)).\
AndReturn([self.tenants.list(), False])
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True, search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers,
all_tenants=True)
api.nova.extension_supported('AdminActions', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)).\
AndReturn(self.flavors.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertContains(res, "instances__migrate")
self.assertNotContains(res, "instances__confirm")
self.assertNotContains(res, "instances__revert")
@test.create_stubs({api.nova: ('flavor_list', 'server_list',
'extension_supported', ),
api.keystone: ('tenant_list',),
api.network: ('servers_update_addresses',)})
def test_index_options_after_migrate(self):
servers = self.servers.list()
server1 = servers[0]
server1.status = "VERIFY_RESIZE"
server2 = servers[2]
server2.status = "VERIFY_RESIZE"
api.keystone.tenant_list(IsA(http.HttpRequest)) \
.AndReturn([self.tenants.list(), False])
search_opts = {'marker': None, 'paginate': True}
api.nova.extension_supported('AdminActions', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True, search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers,
all_tenants=True)
api.nova.flavor_list(IsA(http.HttpRequest)).\
AndReturn(self.flavors.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertContains(res, "instances__confirm")
self.assertContains(res, "instances__revert")
self.assertNotContains(res, "instances__migrate")
@test.create_stubs({api.nova: ('host_list',
'server_get',)})
def test_instance_live_migrate_get(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndReturn(self.hosts.list())
self.mox.ReplayAll()
url = reverse('horizon:admin:instances:live_migrate',
args=[server.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'admin/instances/live_migrate.html')
@test.create_stubs({api.nova: ('server_get',)})
def test_instance_live_migrate_get_server_get_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:admin:instances:live_migrate',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('host_list',
'server_get',)})
def test_instance_live_migrate_list_hypervisor_get_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:admin:instances:live_migrate',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('host_list',
'server_get',)})
def test_instance_live_migrate_list_hypervisor_without_current(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndReturn(self.hosts.list())
self.mox.ReplayAll()
url = reverse('horizon:admin:instances:live_migrate',
args=[server.id])
res = self.client.get(url)
self.assertNotContains(
res, "<option value=\"instance-host\">devstack004</option>")
self.assertContains(
res, "<option value=\"devstack001\">devstack001</option>")
self.assertNotContains(
res, "<option value=\"devstack002\">devstack002</option>")
self.assertContains(
res, "<option value=\"devstack003\">devstack003</option>")
@test.create_stubs({api.nova: ('host_list',
'server_get',
'server_live_migrate',)})
def test_instance_live_migrate_post(self):
server = self.servers.first()
host = self.hosts.first().host_name
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndReturn(self.hosts.list())
api.nova.server_live_migrate(IsA(http.HttpRequest), server.id, host,
block_migration=False,
disk_over_commit=False) \
.AndReturn([])
self.mox.ReplayAll()
url = reverse('horizon:admin:instances:live_migrate',
args=[server.id])
res = self.client.post(url, {'host': host, 'instance_id': server.id})
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('host_list',
'server_get',
'server_live_migrate',)})
def test_instance_live_migrate_post_api_exception(self):
server = self.servers.first()
host = self.hosts.first().host_name
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndReturn(self.hosts.list())
api.nova.server_live_migrate(IsA(http.HttpRequest), server.id, host,
block_migration=False,
disk_over_commit=False) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:admin:instances:live_migrate',
args=[server.id])
res = self.client.post(url, {'host': host, 'instance_id': server.id})
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_get',)})
def test_instance_details_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:admin:instances:detail',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
|
|
from __future__ import annotations
from contextlib import contextmanager
import os
from pathlib import Path
from shutil import rmtree
import tempfile
from typing import (
IO,
Any,
)
import uuid
import numpy as np
from pandas import set_option
from pandas.io.common import get_handle
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd', None}
Name of the decompression to use
Returns
-------
file object
"""
with get_handle(path, "rb", compression=compression, is_text=False) as handle:
yield handle.handle
@contextmanager
def set_timezone(tz: str):
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime(2021, 1, 1)) # doctest: +SKIP
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime(2021, 1, 1))
...
'EST'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
@contextmanager
def ensure_clean(filename=None, return_filelike: bool = False, **kwargs: Any):
"""
Gets a temporary path and agrees to remove on close.
This implementation does not use tempfile.mkstemp to avoid having a file handle.
If the code using the returned path wants to delete the file itself, windows
requires that no program has a file handle to it.
Parameters
----------
filename : str (optional)
suffix of the created file.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
**kwargs
Additional keywords are passed to open().
"""
folder = Path(tempfile.gettempdir())
if filename is None:
filename = ""
filename = str(uuid.uuid4()) + filename
path = folder / filename
path.touch()
handle_or_str: str | IO = str(path)
if return_filelike:
kwargs.setdefault("mode", "w+b")
handle_or_str = open(path, **kwargs)
try:
yield handle_or_str
finally:
if not isinstance(handle_or_str, str):
handle_or_str.close()
if path.is_file():
path.unlink()
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
try:
yield
finally:
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr.USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
set_option("compute.use_numexpr", use)
expr._MIN_ELEMENTS = min_elements
try:
yield
finally:
expr._MIN_ELEMENTS = oldmin
set_option("compute.use_numexpr", olduse)
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed) -> None:
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
|
|
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
import datetime
import uuid
from aiohttp import web
from foglamp.services.core import server
from foglamp.services.core.scheduler.entities import Schedule, StartUpSchedule, TimedSchedule, IntervalSchedule, \
ManualSchedule, Task
from foglamp.services.core.scheduler.exceptions import *
from foglamp.services.core import connect
from foglamp.common.storage_client.payload_builder import PayloadBuilder
__author__ = "Amarendra K. Sinha"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
__DEFAULT_LIMIT = 20
_help = """
-------------------------------------------------------------------------------
| GET | /foglamp/schedule/process |
| GET | /foglamp/schedule/process/{scheduled_process_name} |
| GET POST | /foglamp/schedule |
| GET PUT DELETE | /foglamp/schedule/{schedule_id} |
| PUT | /foglamp/schedule/{schedule_id}/enable |
| PUT | /foglamp/schedule/{schedule_id}/disable |
| PUT | /foglamp/schedule/enable |
| PUT | /foglamp/schedule/disable |
| POST | /foglamp/schedule/start/{schedule_id} |
| GET | /foglamp/schedule/type |
| GET | /foglamp/task |
| GET | /foglamp/task/latest |
| GET | /foglamp/task/{task_id} |
| GET | /foglamp/task/state |
| PUT | /foglamp/task/{task_id}/cancel |
-------------------------------------------------------------------------------
"""
#################################
# Scheduled_processes
#################################
async def get_scheduled_processes(request):
"""
Returns:
a list of all the defined scheduled_processes from scheduled_processes table
:Example:
curl -X GET http://localhost:8081/foglamp/schedule/process
"""
processes_list = await server.Server.scheduler.get_scheduled_processes()
processes = []
for proc in processes_list:
processes.append(proc.name)
return web.json_response({'processes': processes})
async def get_scheduled_process(request):
"""
Returns:
a list of all the defined scheduled_processes from scheduled_processes table
:Example:
curl -X GET http://localhost:8081/foglamp/schedule/process/purge
curl -X GET http://localhost:8081/foglamp/schedule/process/purge%2Cbackup%2Crestore
curl -X GET http://localhost:8081/foglamp/schedule/process/purge%2Cbackup%2Cstats%20collector
"""
scheduled_process_names = request.match_info.get('scheduled_process_name', None)
scheduled_process_name = scheduled_process_names.split(',')
payload = PayloadBuilder().SELECT("name").WHERE(["name", "in", scheduled_process_name]).payload()
_storage = connect.get_storage_async()
scheduled_process = await _storage.query_tbl_with_payload('scheduled_processes', payload)
if len(scheduled_process['rows']) == 0:
raise web.HTTPNotFound(reason='No such Scheduled Process: {}.'.format(scheduled_process_name))
if len(scheduled_process['rows']) == 1:
retval = scheduled_process['rows'][0].get("name")
else:
retval = scheduled_process['rows']
return web.json_response(retval)
#################################
# Schedules
#################################
def _extract_args(data, curr_value):
try:
if 'type' in data and (not isinstance(data['type'], int) and not data['type'].isdigit()):
raise ValueError('Error in type: {}'.format(data['type']))
if 'day' in data:
if isinstance(data['day'], float) or (isinstance(data['day'], str) and (data['day'].strip() != "" and not data['day'].isdigit())):
raise ValueError('Error in day: {}'.format(data['day']))
if 'time' in data and (not isinstance(data['time'], int) and not data['time'].isdigit()):
raise ValueError('Error in time: {}'.format(data['time']))
if 'repeat' in data and (not isinstance(data['repeat'], int) and not data['repeat'].isdigit()):
raise ValueError('Error in repeat: {}'.format(data['repeat']))
_schedule = dict()
_schedule['schedule_id'] = curr_value['schedule_id'] if curr_value else None
s_type = data.get('type') if 'type' in data else curr_value['schedule_type'] if curr_value else 0
_schedule['schedule_type'] = int(s_type)
s_day = data.get('day') if 'day' in data else curr_value['schedule_day'] if curr_value and curr_value[
'schedule_day'] else None
_schedule['schedule_day'] = int(s_day) if s_day is not None and (
isinstance(s_day, int) or (not isinstance(s_day, int) and s_day.isdigit())) else None
s_time = data.get('time') if 'time' in data else curr_value['schedule_time'] if curr_value and curr_value[
'schedule_time'] else 0
_schedule['schedule_time'] = int(s_time)
s_repeat = data.get('repeat') if 'repeat' in data else curr_value['schedule_repeat'] if curr_value and \
curr_value[
'schedule_repeat'] else 0
_schedule['schedule_repeat'] = int(s_repeat)
_schedule['schedule_name'] = data.get('name') if 'name' in data else curr_value[
'schedule_name'] if curr_value else None
_schedule['schedule_process_name'] = data.get('process_name') if 'process_name' in data else curr_value[
'schedule_process_name'] if curr_value else None
_schedule['schedule_exclusive'] = data.get('exclusive') if 'exclusive' in data else curr_value[
'schedule_exclusive'] if curr_value else 'True'
_schedule['schedule_exclusive'] = 'True' if (
(type(_schedule['schedule_exclusive']) is str and _schedule['schedule_exclusive'].lower() in ['t', 'true']) or (
(type(_schedule['schedule_exclusive']) is bool and _schedule['schedule_exclusive'] is True))) else 'False'
_schedule['schedule_enabled'] = data.get('enabled') if 'enabled' in data else curr_value[
'schedule_enabled'] if curr_value else 'True'
_schedule['schedule_enabled'] = 'True' if (
(type(_schedule['schedule_enabled']) is str and _schedule['schedule_enabled'].lower() in ['t', 'true']) or (
(type(_schedule['schedule_enabled']) is bool and _schedule['schedule_enabled'] is True))) else 'False'
_schedule['is_enabled_modified'] = None
if 'enabled' in data:
_schedule['is_enabled_modified'] = True if _schedule['schedule_enabled'] == 'True' else False
except ValueError as ex:
raise web.HTTPBadRequest(reason=str(ex))
return _schedule
async def _check_schedule_post_parameters(data, curr_value=None):
"""
Private method to validate post data for creating a new schedule or updating an existing schedule
Args:
data:
Returns:
list errors
"""
_schedule = _extract_args(data, curr_value)
_errors = list()
# Raise error if schedule_type is missing for a new schedule
if 'schedule_id' not in _schedule and not _schedule.get('schedule_type'):
_errors.append('Schedule type cannot be empty.')
# Raise error if schedule_type is wrong
if _schedule.get('schedule_type') not in list(Schedule.Type):
_errors.append('Schedule type error: {}'.format(_schedule.get('schedule_type')))
# Raise error if day and time are missing for schedule_type = TIMED
if _schedule.get('schedule_type') == Schedule.Type.TIMED:
if not _schedule.get('schedule_time'):
_errors.append('Schedule time cannot be empty for TIMED schedule.')
if _schedule.get('schedule_day') is not None and (not isinstance(_schedule.get('schedule_day'), int) or (
_schedule.get('schedule_day') < 1 or _schedule.get('schedule_day') > 7)):
_errors.append('Day must either be None or must be an integer and in range 1-7.')
if not isinstance(_schedule.get('schedule_time'), int) or (
_schedule.get('schedule_time') < 0 or _schedule.get('schedule_time') > 86399):
_errors.append('Time must be an integer and in range 0-86399.')
# Raise error if repeat is missing or is non integers
if _schedule.get('schedule_type') == Schedule.Type.INTERVAL:
if 'schedule_repeat' not in _schedule:
_errors.append('Repeat is required for INTERVAL Schedule type.')
elif not isinstance(_schedule.get('schedule_repeat'), int):
_errors.append('Repeat must be an integer.')
# Raise error if day is non integer
if _schedule.get('schedule_day') is not None and not isinstance(_schedule.get('schedule_day'), int):
_errors.append('Day must either be None or must be an integer.')
# Raise error if time is non integer
if not isinstance(_schedule.get('schedule_time'), int):
_errors.append('Time must be an integer.')
# Raise error if repeat is non integer
if not isinstance(_schedule.get('schedule_repeat'), int):
_errors.append('Repeat must be an integer.')
# Raise error if name and process_name are missing for a new schedule
if not _schedule.get('schedule_name') or not _schedule.get('schedule_process_name'):
_errors.append('Schedule name and Process name cannot be empty.')
# Raise error if scheduled_process name is wrong
payload = PayloadBuilder().SELECT("name").WHERE(["name", "=", _schedule.get('schedule_process_name')]).payload()
_storage = connect.get_storage_async()
scheduled_process = await _storage.query_tbl_with_payload('scheduled_processes', payload)
if len(scheduled_process['rows']) == 0:
raise ScheduleProcessNameNotFoundError('No such Scheduled Process name: {}'.format(_schedule.get('schedule_process_name')))
# Raise error if exclusive is wrong
if _schedule.get('schedule_exclusive') not in ['True', 'False']:
_errors.append('Schedule exclusive error: {}'.format(_schedule.get('schedule_exclusive')))
# Raise error if enabled is wrong
if _schedule.get('schedule_enabled') not in ['True', 'False']:
_errors.append('Schedule enabled error: {}'.format(_schedule.get('schedule_enabled')))
return _errors
async def _execute_add_update_schedule(data, curr_value=None):
"""
Private method common to create a new schedule and update an existing schedule
Args:
data:
Returns:
schedule_id (new for created, existing for updated)
"""
_schedule = _extract_args(data, curr_value)
# Create schedule object as Scheduler.save_schedule requires an object
if _schedule.get('schedule_type') == Schedule.Type.STARTUP:
schedule = StartUpSchedule()
elif _schedule.get('schedule_type') == Schedule.Type.TIMED:
schedule = TimedSchedule()
schedule.day = _schedule.get('schedule_day')
m, s = divmod(_schedule.get('schedule_time'), 60)
h, m = divmod(m, 60)
schedule.time = datetime.time().replace(hour=h, minute=m, second=s)
elif _schedule.get('schedule_type') == Schedule.Type.INTERVAL:
schedule = IntervalSchedule()
elif _schedule.get('schedule_type') == Schedule.Type.MANUAL:
schedule = ManualSchedule()
# Populate scheduler object
schedule.schedule_id = _schedule.get('schedule_id')
schedule.name = _schedule.get('schedule_name')
schedule.process_name = _schedule.get('schedule_process_name')
schedule.repeat = datetime.timedelta(seconds=_schedule['schedule_repeat'])
schedule.exclusive = True if _schedule.get('schedule_exclusive') == 'True' else False
schedule.enabled = True if _schedule.get('schedule_enabled') == 'True' else False
# Save schedule
await server.Server.scheduler.save_schedule(schedule, _schedule['is_enabled_modified'])
updated_schedule_id = schedule.schedule_id
return updated_schedule_id
async def get_schedules(request):
"""
Returns:
a list of all the defined schedules from schedules table
:Example:
curl -X GET http://localhost:8081/foglamp/schedule
"""
schedule_list = await server.Server.scheduler.get_schedules()
schedules = []
for sch in schedule_list:
schedules.append({
'id': str(sch.schedule_id),
'name': sch.name,
'processName': sch.process_name,
'type': Schedule.Type(int(sch.schedule_type)).name,
'repeat': sch.repeat.total_seconds() if sch.repeat else 0,
'time': (sch.time.hour * 60 * 60 + sch.time.minute * 60 + sch.time.second) if sch.time else 0,
'day': sch.day,
'exclusive': sch.exclusive,
'enabled': sch.enabled
})
return web.json_response({'schedules': schedules})
async def get_schedule(request):
"""
Returns:
the information for the given schedule from schedules table
:Example:
curl -X GET http://localhost:8081/foglamp/schedule/2176eb68-7303-11e7-8cf7-a6006ad3dba0
"""
try:
schedule_id = request.match_info.get('schedule_id', None)
try:
assert uuid.UUID(schedule_id)
except ValueError as ex:
raise web.HTTPNotFound(reason="Invalid Schedule ID {}".format(schedule_id))
sch = await server.Server.scheduler.get_schedule(uuid.UUID(schedule_id))
schedule = {
'id': str(sch.schedule_id),
'name': sch.name,
"processName": sch.process_name,
'type': Schedule.Type(int(sch.schedule_type)).name,
'repeat': sch.repeat.total_seconds() if sch.repeat else 0,
'time': (sch.time.hour * 60 * 60 + sch.time.minute * 60 + sch.time.second) if sch.time else 0,
'day': sch.day,
'exclusive': sch.exclusive,
'enabled': sch.enabled
}
return web.json_response(schedule)
except (ValueError, ScheduleNotFoundError) as ex:
raise web.HTTPNotFound(reason=str(ex))
async def enable_schedule_with_name(request):
""" Enables the schedule for given schedule_name or schedule_id in request payload
curl -X PUT http://localhost:8081/foglamp/schedule/enable -d '{"schedule_name": "a schedule name"}'
:param request: {"schedule_name": "sinusoid"} or {"schedule_id": "uuid of schedule"}
:return:
"""
try:
data = await request.json()
sch_name = data.get('schedule_name', None)
sch_id = data.get('schedule_id', None)
if not sch_name and not sch_id:
raise web.HTTPBadRequest(reason='Schedule name or ID is required')
if sch_name and not sch_id:
storage_client = connect.get_storage_async()
payload = PayloadBuilder().SELECT("id").WHERE(['schedule_name', '=', sch_name]).payload()
result = await storage_client.query_tbl_with_payload('schedules', payload)
if int(result['count']):
sch_id = result['rows'][0]['id']
if sch_id:
try:
assert uuid.UUID(sch_id)
except (TypeError, ValueError):
raise web.HTTPNotFound(reason="No Schedule with ID {}".format(sch_id))
status, reason = await server.Server.scheduler.enable_schedule(uuid.UUID(sch_id))
schedule = {
'scheduleId': sch_id,
'status': status,
'message': reason
}
except (KeyError, ValueError, ScheduleNotFoundError) as e:
raise web.HTTPNotFound(reason=str(e))
else:
return web.json_response(schedule)
async def disable_schedule_with_name(request):
""" Disable the schedule for given schedule_name or schedule_id in request payload
curl -X PUT http://localhost:8081/foglamp/schedule/disable -d '{"schedule_name": "a schedule name"}'
:param request: {"schedule_name": "sinusoid"} or {"schedule_id": "uuid of schedule"}
:return:
"""
try:
data = await request.json()
sch_name = data.get('schedule_name', None)
sch_id = data.get('schedule_id', None)
if not sch_name and not sch_id:
raise web.HTTPBadRequest(reason='Schedule name or ID is required')
if sch_name and not sch_id:
storage_client = connect.get_storage_async()
payload = PayloadBuilder().SELECT("id").WHERE(['schedule_name', '=', sch_name]).payload()
result = await storage_client.query_tbl_with_payload('schedules', payload)
if int(result['count']):
sch_id = result['rows'][0]['id']
if sch_id:
try:
assert uuid.UUID(sch_id)
except (TypeError, ValueError):
raise web.HTTPNotFound(reason="No Schedule with ID {}".format(sch_id))
status, reason = await server.Server.scheduler.disable_schedule(uuid.UUID(sch_id))
schedule = {
'scheduleId': sch_id,
'status': status,
'message': reason
}
except (KeyError, ValueError, ScheduleNotFoundError) as e:
raise web.HTTPNotFound(reason=str(e))
else:
return web.json_response(schedule)
async def enable_schedule(request):
"""
Enable the given schedule from schedules table
:Example:
curl -X PUT http://localhost:8081/foglamp/schedule/ac6dd55d-f55d-44f7-8741-984604bf2384/enable
"""
try:
schedule_id = request.match_info.get('schedule_id', None)
try:
assert uuid.UUID(schedule_id)
except ValueError as ex:
raise web.HTTPNotFound(reason="Invalid Schedule ID {}".format(schedule_id))
status, reason = await server.Server.scheduler.enable_schedule(uuid.UUID(schedule_id))
schedule = {
'scheduleId': schedule_id,
'status': status,
'message': reason
}
return web.json_response(schedule)
except (ValueError, ScheduleNotFoundError) as ex:
raise web.HTTPNotFound(reason=str(ex))
async def disable_schedule(request):
"""
Disable the given schedule from schedules table
:Example:
curl -X PUT http://localhost:8081/foglamp/schedule/ac6dd55d-f55d-44f7-8741-984604bf2384/disable
"""
try:
schedule_id = request.match_info.get('schedule_id', None)
try:
assert uuid.UUID(schedule_id)
except ValueError as ex:
raise web.HTTPNotFound(reason="Invalid Schedule ID {}".format(schedule_id))
status, reason = await server.Server.scheduler.disable_schedule(uuid.UUID(schedule_id))
schedule = {
'scheduleId': schedule_id,
'status': status,
'message': reason
}
return web.json_response(schedule)
except (ValueError, ScheduleNotFoundError) as ex:
raise web.HTTPNotFound(reason=str(ex))
async def start_schedule(request):
"""
Starts a given schedule
:Example:
curl -X POST http://localhost:8081/foglamp/schedule/start/fd439e5b-86ba-499a-86d3-34a6e5754b5a
"""
try:
schedule_id = request.match_info.get('schedule_id', None)
try:
assert uuid.UUID(schedule_id)
except ValueError as ex:
raise web.HTTPNotFound(reason="Invalid Schedule ID {}".format(schedule_id))
await server.Server.scheduler.get_schedule(uuid.UUID(schedule_id))
# Start schedule
resp = await server.Server.scheduler.queue_task(uuid.UUID(schedule_id))
if resp is True:
return web.json_response({'id': schedule_id, 'message': 'Schedule started successfully'})
else:
return web.json_response({'id': schedule_id, 'message': 'Schedule could not be started'})
except (ValueError, ScheduleNotFoundError, NotReadyError) as ex:
raise web.HTTPNotFound(reason=str(ex))
async def post_schedule(request):
"""
Create a new schedule in schedules table
:Example:
curl -d '{"type": 3, "name": "sleep30test", "process_name": "sleep30", "repeat": "45"}' -X POST http://localhost:8081/foglamp/schedule
"""
try:
data = await request.json()
schedule_id = data.get('schedule_id', None)
if schedule_id:
raise web.HTTPBadRequest(reason='Schedule ID not needed for new Schedule.')
go_no_go = await _check_schedule_post_parameters(data)
if len(go_no_go) != 0:
raise ValueError("Errors in request: {} {}".format(','.join(go_no_go), len(go_no_go)))
updated_schedule_id = await _execute_add_update_schedule(data)
sch = await server.Server.scheduler.get_schedule(updated_schedule_id)
schedule = {
'id': str(sch.schedule_id),
'name': sch.name,
"processName": sch.process_name,
'type': Schedule.Type(int(sch.schedule_type)).name,
'repeat': sch.repeat.total_seconds() if sch.repeat else 0,
'time': (sch.time.hour * 60 * 60 + sch.time.minute * 60 + sch.time.second) if sch.time else 0,
'day': sch.day,
'exclusive': sch.exclusive,
'enabled': sch.enabled
}
return web.json_response({'schedule': schedule})
except (ScheduleNotFoundError, ScheduleProcessNameNotFoundError) as ex:
raise web.HTTPNotFound(reason=str(ex))
except ValueError as ex:
raise web.HTTPBadRequest(reason=str(ex))
async def update_schedule(request):
"""
Update a schedule in schedules table
:Example:
curl -d '{"type": 4, "name": "sleep30 updated", "process_name": "sleep30", "repeat": "15"}' -X PUT http://localhost:8081/foglamp/schedule/84fe4ea1-df9c-4c87-bb78-cab2e7d5d2cc
"""
try:
data = await request.json()
schedule_id = request.match_info.get('schedule_id', None)
try:
assert uuid.UUID(schedule_id)
except ValueError as ex:
raise web.HTTPNotFound(reason="Invalid Schedule ID {}".format(schedule_id))
sch = await server.Server.scheduler.get_schedule(uuid.UUID(schedule_id))
if not sch:
raise ScheduleNotFoundError(schedule_id)
curr_value = dict()
curr_value['schedule_id'] = sch.schedule_id
curr_value['schedule_process_name'] = sch.process_name
curr_value['schedule_name'] = sch.name
curr_value['schedule_type'] = sch.schedule_type
curr_value['schedule_repeat'] = sch.repeat.total_seconds() if sch.repeat else 0
curr_value['schedule_time'] = (sch.time.hour * 60 * 60 + sch.time.minute * 60 + sch.time.second) if sch.time else 0
curr_value['schedule_day'] = sch.day
curr_value['schedule_exclusive'] = sch.exclusive
curr_value['schedule_enabled'] = sch.enabled
go_no_go = await _check_schedule_post_parameters(data, curr_value)
if len(go_no_go) != 0:
raise ValueError("Errors in request: {}".format(','.join(go_no_go)))
updated_schedule_id = await _execute_add_update_schedule(data, curr_value)
sch = await server.Server.scheduler.get_schedule(updated_schedule_id)
schedule = {
'id': str(sch.schedule_id),
'name': sch.name,
"processName": sch.process_name,
'type': Schedule.Type(int(sch.schedule_type)).name,
'repeat': sch.repeat.total_seconds() if sch.repeat else 0,
'time': (sch.time.hour * 60 * 60 + sch.time.minute * 60 + sch.time.second) if sch.time else 0,
'day': sch.day,
'exclusive': sch.exclusive,
'enabled': sch.enabled
}
return web.json_response({'schedule': schedule})
except (ScheduleNotFoundError, ScheduleProcessNameNotFoundError) as ex:
raise web.HTTPNotFound(reason=str(ex))
except ValueError as ex:
raise web.HTTPBadRequest(reason=str(ex))
async def delete_schedule(request):
"""
Delete a schedule from schedules table
:Example:
curl -X DELETE http://localhost:8081/foglamp/schedule/dc9bfc01-066a-4cc0-b068-9c35486db87f
"""
try:
schedule_id = request.match_info.get('schedule_id', None)
try:
assert uuid.UUID(schedule_id)
except ValueError as ex:
raise web.HTTPNotFound(reason="Invalid Schedule ID {}".format(schedule_id))
retval, message = await server.Server.scheduler.delete_schedule(uuid.UUID(schedule_id))
return web.json_response({'message': message, 'id': schedule_id})
except RuntimeWarning:
raise web.HTTPConflict(reason="Enabled Schedule {} cannot be deleted.".format(schedule_id))
except (ValueError, ScheduleNotFoundError, NotReadyError) as ex:
raise web.HTTPNotFound(reason=str(ex))
async def get_schedule_type(request):
"""
Args:
request:
Returns:
an array of Schedule type enumeration key index values
:Example:
curl -X GET http://localhost:8081/foglamp/schedule/type
"""
results = []
for _type in Schedule.Type:
data = {'index': _type.value, 'name': _type.name}
results.append(data)
return web.json_response({'scheduleType': results})
#################################
# Tasks
#################################
async def get_task(request):
"""
Returns:
a task list
:Example:
curl -X GET http://localhost:8081/foglamp/task/{task_id}
"""
try:
task_id = request.match_info.get('task_id', None)
try:
assert uuid.UUID(task_id)
except ValueError as ex:
raise web.HTTPNotFound(reason="Invalid Task ID {}".format(task_id))
tsk = await server.Server.scheduler.get_task(task_id)
task = {
'id': str(tsk.task_id),
'name': tsk.schedule_name,
'processName': tsk.process_name,
'state': Task.State(int(tsk.state)).name.capitalize(),
'startTime': str(tsk.start_time),
'endTime': str(tsk.end_time),
'exitCode': tsk.exit_code,
'reason': tsk.reason
}
return web.json_response(task)
except (ValueError, TaskNotFoundError) as ex:
raise web.HTTPNotFound(reason=str(ex))
async def get_tasks(request):
"""
Returns:
the list of tasks
:Example:
curl -X GET http://localhost:8081/foglamp/task
curl -X GET http://localhost:8081/foglamp/task?limit=2
curl -X GET http://localhost:8081/foglamp/task?name=xxx
curl -X GET http://localhost:8081/foglamp/task?state=xxx
curl -X GET http://localhost:8081/foglamp/task?name=xxx&state=xxx
"""
try:
limit = __DEFAULT_LIMIT
if 'limit' in request.query and request.query['limit'] != '':
try:
limit = int(request.query['limit'])
if limit < 0:
raise ValueError
except ValueError:
raise web.HTTPBadRequest(reason="Limit must be a positive integer")
name = None
if 'name' in request.query and request.query['name'] != '':
name = request.query['name']
state = None
if 'state' in request.query and request.query['state'] != '':
try:
state = Task.State[request.query['state'].upper()].value
except KeyError as ex:
raise web.HTTPBadRequest(reason="This state value {} not permitted.".format(ex))
where_clause = None
if name and state:
where_clause = (["schedule_name", "=", name], ["state", "=", state])
elif name:
where_clause = ["schedule_name", "=", name]
elif state:
where_clause = ["state", "=", state]
tasks = await server.Server.scheduler.get_tasks(where=where_clause, limit=limit)
if len(tasks) == 0:
raise web.HTTPNotFound(reason="No Tasks found")
new_tasks = []
for task in tasks:
new_tasks.append(
{'id': str(task.task_id),
'name': task.schedule_name,
'processName': task.process_name,
'state': Task.State(int(task.state)).name.capitalize(),
'startTime': str(task.start_time),
'endTime': str(task.end_time),
'exitCode': task.exit_code,
'reason': task.reason
}
)
return web.json_response({'tasks': new_tasks})
except (ValueError, TaskNotFoundError) as ex:
raise web.HTTPNotFound(reason=str(ex))
async def get_tasks_latest(request):
"""
Returns:
the list of the most recent task execution for each name from tasks table
:Example:
curl -X GET http://localhost:8081/foglamp/task/latest
curl -X GET http://localhost:8081/foglamp/task/latest?name=xxx
"""
payload = PayloadBuilder().SELECT("id", "schedule_name", "process_name", "state", "start_time", "end_time", "reason", "pid", "exit_code")\
.ALIAS("return", ("start_time", 'start_time'), ("end_time", 'end_time'))\
.FORMAT("return", ("start_time", "YYYY-MM-DD HH24:MI:SS.MS"), ("end_time", "YYYY-MM-DD HH24:MI:SS.MS"))\
.ORDER_BY(["schedule_name", "asc"], ["start_time", "desc"])
if 'name' in request.query and request.query['name'] != '':
name = request.query['name']
payload.WHERE(["schedule_name", "=", name])
try:
_storage = connect.get_storage_async()
results = await _storage.query_tbl_with_payload('tasks', payload.payload())
if len(results['rows']) == 0:
raise web.HTTPNotFound(reason="No Tasks found")
tasks = []
previous_schedule = None
for row in results['rows']:
if not row['schedule_name'].strip():
continue
if previous_schedule != row['schedule_name']:
tasks.append(row)
previous_schedule = row['schedule_name']
new_tasks = []
for task in tasks:
new_tasks.append(
{'id': str(task['id']),
'name': task['schedule_name'],
'processName': task['process_name'],
'state': [t.name.capitalize() for t in list(Task.State)][int(task['state']) - 1],
'startTime': str(task['start_time']),
'endTime': str(task['end_time']),
'exitCode': task['exit_code'],
'reason': task['reason'],
'pid': task['pid']
}
)
return web.json_response({'tasks': new_tasks})
except (ValueError, TaskNotFoundError) as ex:
raise web.HTTPNotFound(reason=str(ex))
async def cancel_task(request):
"""Cancel a running task from tasks table
:Example:
curl -X PUT http://localhost:8081/foglamp/task/{task_id}/cancel
"""
try:
task_id = request.match_info.get('task_id', None)
try:
assert uuid.UUID(task_id)
except ValueError as ex:
raise web.HTTPNotFound(reason="Invalid Task ID {}".format(task_id))
await server.Server.scheduler.get_task(task_id)
# Cancel Task
await server.Server.scheduler.cancel_task(uuid.UUID(task_id))
return web.json_response({'id': task_id, 'message': 'Task cancelled successfully'})
except (ValueError, TaskNotFoundError, TaskNotRunningError) as ex:
raise web.HTTPNotFound(reason=str(ex))
async def get_task_state(request):
"""
Returns:
an array of Task State enumeration key index values
:Example:
curl -X GET http://localhost:8081/foglamp/task/state
"""
results = []
for _state in Task.State:
data = {'index': _state.value, 'name': _state.name.capitalize()}
results.append(data)
return web.json_response({'taskState': results})
|
|
# License: BSD 3 clause
from warnings import warn
import numpy as np
from tick.base import Base
from tick.base_model import ModelLipschitz
from .learner_optim import LearnerOptim
class LearnerGLM(LearnerOptim):
"""Learner for a Generalized Linear Model (GML).
Not intended for end-users, but for development only.
It should be sklearn-learn compliant
Parameters
----------
C : `float`, default=1e3
Level of penalization
penalty : 'none', 'l1', 'l2', 'elasticnet', 'tv', 'binarsity', default='l2'
The penalization to use. Default 'l2', namely ridge penalization.
solver : 'gd', 'agd', 'bfgs', 'svrg', 'sdca'
The name of the solver to use
fit_intercept : `bool`, default=True
If `True`, include an intercept in the model
warm_start : `bool`, default=False
If true, learning will start from the last reached solution
step : `float`, default=None
Initial step size used for learning. Used in 'gd', 'agd', 'sgd'
and 'svrg' solvers
tol : `float`, default=1e-5
The tolerance of the solver (iterations stop when the stopping
criterion is below it). By default the solver does ``max_iter``
iterations
max_iter : `int`, default=100
Maximum number of iterations of the solver
verbose : `bool`, default=True
If `True`, we verbose things, otherwise the solver does not
print anything (but records information in history anyway)
print_every : `int`, default=10
Print history information when ``n_iter`` (iteration number) is
a multiple of ``print_every``
record_every : `int`, default=10
Record history information when ``n_iter`` (iteration number) is
a multiple of ``record_every``
Other Parameters
----------------
sdca_ridge_strength : `float`, default=1e-3
It controls the strength of the additional ridge penalization. Used in
'sdca' solver
elastic_net_ratio : `float`, default=0.95
Ratio of elastic net mixing parameter with 0 <= ratio <= 1.
For ratio = 0 this is ridge (L2 squared) regularization
For ratio = 1 this is lasso (L1) regularization
For 0 < ratio < 1, the regularization is a linear combination
of L1 and L2.
Used in 'elasticnet' penalty
random_state : int seed, RandomState instance, or None (default)
The seed that will be used by stochastic solvers. Used in 'sgd',
'svrg', and 'sdca' solvers
blocks_start : `numpy.array`, shape=(n_features,), default=None
The indices of the first column of each binarized feature blocks. It
corresponds to the ``feature_indices`` property of the
``FeaturesBinarizer`` preprocessing.
Used in 'binarsity' penalty
blocks_length : `numpy.array`, shape=(n_features,), default=None
The length of each binarized feature blocks. It corresponds to the
``n_values`` property of the ``FeaturesBinarizer`` preprocessing.
Used in 'binarsity' penalty
Attributes
----------
weights : np.array, shape=(n_features,)
The learned weights of the model (not including the intercept)
intercept : `float` or None
The intercept, if ``fit_intercept=True``, otherwise `None`
"""
_attrinfos = {
"_fit_intercept": {
"writable": False
},
"weights": {
"writable": False
},
"intercept": {
"writable": False
},
}
def __init__(self, fit_intercept=True, penalty='l2', C=1e3, solver="svrg",
step=None, tol=1e-5, max_iter=100, verbose=True,
warm_start=False, print_every=10, record_every=10,
sdca_ridge_strength=1e-3, elastic_net_ratio=0.95,
random_state=None, blocks_start=None, blocks_length=None):
extra_model_kwargs = {'fit_intercept': fit_intercept}
LearnerOptim.__init__(
self, penalty=penalty, C=C, solver=solver, step=step, tol=tol,
max_iter=max_iter, verbose=verbose, warm_start=warm_start,
print_every=print_every, record_every=record_every,
sdca_ridge_strength=sdca_ridge_strength,
elastic_net_ratio=elastic_net_ratio, random_state=random_state,
extra_model_kwargs=extra_model_kwargs, blocks_start=blocks_start,
blocks_length=blocks_length)
self.fit_intercept = fit_intercept
self.weights = None
self.intercept = None
@property
def fit_intercept(self):
return self._model_obj.fit_intercept
@fit_intercept.setter
def fit_intercept(self, val: bool):
self._model_obj.fit_intercept = val
def fit(self, X: object, y: np.array):
"""
Fit the model according to the given training data.
Parameters
----------
X : `np.ndarray` or `scipy.sparse.csr_matrix`,, shape=(n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : `np.array`, shape=(n_samples,)
Target vector relative to X.
Returns
-------
self : LearnerGLM
The fitted instance of the model
"""
solver_obj = self._solver_obj
model_obj = self._model_obj
prox_obj = self._prox_obj
fit_intercept = self.fit_intercept
# Pass the data to the model
model_obj.fit(X, y)
if self.step is None and self.solver in self._solvers_with_step:
if self.solver in self._solvers_with_linesearch:
self._solver_obj.linesearch = True
elif self.solver == 'svrg':
if isinstance(self._model_obj, ModelLipschitz):
self.step = 1. / self._model_obj.get_lip_max()
else:
warn('SVRG step needs to be tuned manually',
RuntimeWarning)
self.step = 1.
elif self.solver == 'sgd':
warn('SGD step needs to be tuned manually', RuntimeWarning)
self.step = 1.
# Determine the range of the prox
# User cannot specify a custom range if he is using learners
if fit_intercept:
# Don't penalize the intercept (intercept is the last coeff)
prox_obj.range = (0, model_obj.n_coeffs - 1)
else:
prox_obj.range = (0, model_obj.n_coeffs)
# Now, we can pass the model and prox objects to the solver
solver_obj.set_model(model_obj).set_prox(prox_obj)
coeffs_start = None
if self.warm_start and self.weights is not None:
if self.fit_intercept and self.intercept is not None:
coeffs = np.hstack((self.weights, self.intercept))
else:
coeffs = self.weights
# ensure starting point has the right format
if coeffs is not None and coeffs.shape == (model_obj.n_coeffs,):
coeffs_start = coeffs
else:
raise ValueError('Cannot warm start, coeffs don\'t have the '
'right shape')
# Launch the solver
coeffs = solver_obj.solve(coeffs_start)
# Get the learned coefficients
if fit_intercept:
self._set("weights", coeffs[:-1])
self._set("intercept", coeffs[-1])
else:
self._set("weights", coeffs)
self._set("intercept", None)
self._set("_fitted", True)
return self
def get_params(self):
"""
Get parameters for this estimator.
Returns
-------
params : `dict`
Parameter names mapped to their values.
"""
dd = {
'fit_intercept': self.fit_intercept,
'penalty': self.penalty,
'C': self.C,
'solver': self.solver,
'step': self.step,
'tol': self.tol,
'max_iter': self.max_iter,
'verbose': self.verbose,
'warm_start': self.warm_start,
'print_every': self.print_every,
'record_every': self.record_every,
'sdca_ridge_strength': self.sdca_ridge_strength,
'elastic_net_ratio': self.elastic_net_ratio,
'random_state': self.random_state,
'blocks_start': self.blocks_start,
'blocks_length': self.blocks_length,
}
return dd
def set_params(self, **kwargs):
"""
Set the parameters for this learner.
Parameters
----------
**kwargs :
Named arguments to update in the learner
Returns
-------
output : `LearnerGLM`
self with updated parameters
"""
for key, val in kwargs.items():
setattr(self, key, val)
return self
def _as_dict(self):
dd = Base._as_dict(self)
dd.pop("intercept", None)
dd.pop("weights", None)
return dd
|
|
# proc: Simple interface to Linux process information.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: April 26, 2020
# URL: https://proc.readthedocs.io
"""
The :mod:`proc.cron` module implements graceful termination of cron_.
.. contents::
:local:
Introduction to cron
====================
The cron_ daemon is ubiquitous on Linux (UNIX) systems. It's responsible for
executing user defined "jobs" at fixed times, dates or intervals. It's used for
system maintenance tasks, periodic monitoring, production job servers for IT
companies around the world, etc.
Problem statement
=================
One thing that has always bothered me about cron_ is that there is no simple
and robust way to stop cron and wait for all running cron jobs to finish what
they were doing. You might be wondering why that would be useful...
Imagine you have to perform disruptive system maintenance on a job server
that's responsible for running dozens or even hundreds of important cron jobs.
Of course you can just run ``sudo service cron stop`` to stop cron from
starting new cron jobs, but what do you do about cron jobs that have already
started and are still running? Some options:
1. You just don't care and start your disruptive maintenance. In this case you
can stop reading because what I'm proposing won't interest you! :-)
2. You stare at an interactive process monitor like top_, htop_, etc. until
everything that looks like a cron job has disappeared from the screen. Good
for you for being diligent about your work, but this is not a nice task to
perform! Imagine you have to do it on a handful of job servers before
starting disruptive maintenance on shared infrastructure like a central
database server...
3. You automate your work with shell scripts or one-liners_ that involve
grepping_ the output of ps_ or similar gymnastics that work most of the time
but not quite always... (hi me from a few years ago! :-)
Of course there are dozens (hundreds?) of alternative job schedulers that could
make things easier but the thing is that cron_ is already here and widely used,
so migrating a handful of job servers with hundreds of jobs could be way more
work than it's ever going to be worth...
A robust solution: ``cron-graceful``
====================================
The :mod:`proc.cron` module implements the command line program
``cron-graceful`` which gracefully stops cron daemons. This module builds on
top of the :mod:`proc.tree` module as a demonstration of the possibilities
of the `proc` package and as a practical tool that is ready to be used on any
Linux system that has Python and cron_ installed.
The following command prints a usage message:
.. code-block:: sh
$ cron-graceful --help
To use the program you simply run it with super user privileges:
.. code-block:: sh
$ sudo cron-graceful
Internal documentation of :mod:`proc.cron`
==========================================
.. _cron: http://en.wikipedia.org/wiki/Cron
.. _grepping: http://en.wikipedia.org/wiki/Grep#Usage_as_a_verb
.. _htop: http://en.wikipedia.org/wiki/Htop
.. _one-liners: http://en.wikipedia.org/wiki/One-liner_program
.. _ps: http://en.wikipedia.org/wiki/Ps_(Unix)
.. _top: http://en.wikipedia.org/wiki/Top_(software)
"""
# Standard library modules.
import functools
import getopt
import logging
import os
import sys
# External dependencies.
import coloredlogs
from executor import ExternalCommandFailed, execute, quote, which
from humanfriendly import Timer, format_timespan
from humanfriendly.terminal import usage, warning
from humanfriendly.terminal.spinners import Spinner
from humanfriendly.text import concatenate, pluralize
# Modules provided by our package.
from proc.core import sorted_by_pid
from proc.tree import get_process_tree
# Public identifiers that require documentation.
__all__ = (
'ADDITIONS_SCRIPT_NAME',
'CronDaemonNotRunning',
'USAGE_TEXT',
'cron_graceful',
'ensure_root_privileges',
'find_cron_daemon',
'logger',
'main',
'parse_arguments',
'run_additions',
'terminate_cron_daemon',
'wait_for_processes',
)
# Initialize a logger.
logger = logging.getLogger(__name__)
# Inject our logger into all execute() calls.
execute = functools.partial(execute, logger=logger)
ADDITIONS_SCRIPT_NAME = 'cron-graceful-additions'
"""
The name of the external command that's run by ``cron-graceful`` (a string).
Refer to :func:`run_additions()` for details about how
:data:`ADDITIONS_SCRIPT_NAME` is used.
"""
USAGE_TEXT = """
Usage: cron-graceful [OPTIONS]
Gracefully stop the cron job scheduler by waiting for all running cron
jobs to finish. The cron-graceful program works as follows:
1. Identify the cron daemon process and send it a SIGSTOP signal to
prevent it from scheduling new cron jobs without killing it.
2. Identify the currently running cron jobs by navigating the process
tree (if the cron daemon process had been killed in step one this
wouldn't be possible) and wait for the cron jobs to finish.
4. Terminate the cron daemon process (because we've already identified
the running cron jobs and no new cron jobs can be scheduled we no
longer need the daemon).
If a command named `cron-graceful-additions' exists in the $PATH it
will be executed between steps one and two. This allows you to inject
custom logic into the graceful shutdown process. If the command fails a
warning will be logged but the cron-graceful program will continue.
Supported options:
-n, --dry-run
Don't make any changes (doesn't require root access).
-v, --verbose
Make more noise (increase verbosity).
-q, --quiet
Make less noise (decrease verbosity).
-h, --help
Show this message and exit.
"""
def main():
"""Wrapper for :func:`cron_graceful()` that feeds it :data:`sys.argv`."""
coloredlogs.install(syslog=True)
cron_graceful(sys.argv[1:])
def cron_graceful(arguments):
"""Command line interface for the ``cron-graceful`` program."""
runtime_timer = Timer()
# Initialize logging to the terminal.
dry_run = parse_arguments(arguments)
if not dry_run:
ensure_root_privileges()
try:
cron_daemon = find_cron_daemon()
except CronDaemonNotRunning:
logger.info("No running cron daemon found, assuming it was previously stopped ..")
else:
if not dry_run:
# Prevent the cron daemon from starting new cron jobs.
cron_daemon.suspend()
# Enable user defined additional logic.
run_additions()
# Identify the running cron jobs based on the process tree _after_ the
# cron daemon has been paused (assuming we're not performing a dry run)
# so we know for sure that we see all running cron jobs (also we're not
# interested in any processes that have already been stopped by
# cron-graceful-additions).
cron_daemon = find_cron_daemon()
cron_jobs = sorted_by_pid(cron_daemon.grandchildren)
if cron_jobs:
logger.info("Found %s: %s",
pluralize(len(cron_jobs), "running cron job"),
concatenate(str(j.pid) for j in cron_jobs))
# Wait for the running cron jobs to finish.
wait_for_processes(cron_jobs)
else:
logger.info("No running cron jobs found.")
# Terminate the cron daemon.
if dry_run:
logger.info("Stopping cron daemon with process id %i ..", cron_daemon.pid)
else:
terminate_cron_daemon(cron_daemon)
logger.info("Done! Took %s to gracefully terminate cron.", runtime_timer.rounded)
def parse_arguments(arguments):
"""
Parse the command line arguments.
:param arguments: A list of strings with command line arguments.
:returns: ``True`` if a dry run was requested, ``False`` otherwise.
"""
dry_run = False
try:
options, arguments = getopt.gnu_getopt(arguments, 'nvqh', [
'dry-run', 'verbose', 'quiet', 'help'
])
for option, value in options:
if option in ('-n', '--dry-run'):
dry_run = True
elif option in ('-v', '--verbose'):
coloredlogs.increase_verbosity()
elif option in ('-q', '--quiet'):
coloredlogs.decrease_verbosity()
elif option in ('-h', '--help'):
usage(USAGE_TEXT)
sys.exit(0)
else:
assert False, "Unhandled option!"
return dry_run
except Exception as e:
warning("Error: Failed to parse command line arguments! (%s)", e)
sys.exit(1)
def ensure_root_privileges():
"""
Make sure we have root privileges.
"""
if os.getuid() != 0:
warning("Error: Please run this command as root!")
sys.exit(1)
def find_cron_daemon():
"""
Find the cron daemon process.
:returns: A :class:`~proc.tree.ProcessNode` object.
:raises: :exc:`CronDaemonNotRunning` when the cron daemon process cannot
be located.
"""
init = get_process_tree()
cron = init.find(exe_name='cron')
if not cron:
raise CronDaemonNotRunning("Failed to determine process id of cron daemon process! Is it running?")
return cron
def run_additions():
"""
Allow local additions to the behavior of ``cron-graceful``.
If a command with the name of :data:`ADDITIONS_SCRIPT_NAME` exists in the
``$PATH`` it will be executed directly after the cron daemon is paused by
:func:`cron_graceful()`. This allows you to inject custom logic into the
graceful shutdown process. If the command fails a warning will be logged
but the ``cron-graceful`` program will continue.
"""
matching_programs = which(ADDITIONS_SCRIPT_NAME)
if matching_programs:
logger.info("Running command %s ..", matching_programs[0])
try:
execute(matching_programs[0], shell=False)
except ExternalCommandFailed as e:
logger.warning("Command failed with exit status %i!", e.returncode)
def wait_for_processes(processes):
"""
Wait for the given processes to end.
Prints an overview of running processes to the terminal once a second so
the user knows what they are waiting for.
This function is not specific to :mod:`proc.cron` at all (it doesn't
even need to know what cron jobs are), it just waits until all of the given
processes have ended.
:param processes: A list of :class:`~proc.tree.ProcessNode` objects.
"""
wait_timer = Timer()
running_processes = list(processes)
for process in running_processes:
logger.info("Waiting for process %i: %s (runtime is %s)",
process.pid, quote(process.cmdline), format_timespan(round(process.runtime)))
with Spinner(timer=wait_timer) as spinner:
while True:
for process in list(running_processes):
if not process.is_alive:
running_processes.remove(process)
if not running_processes:
break
num_processes = pluralize(len(running_processes), "process", "processes")
process_ids = concatenate(str(p.pid) for p in running_processes)
spinner.step(label="Waiting for %s: %s" % (num_processes, process_ids))
spinner.sleep()
logger.info("All processes have finished, we're done waiting (took %s).", wait_timer.rounded)
def terminate_cron_daemon(cron_daemon):
"""
Terminate the cron daemon.
:param cron_daemon: The :class:`~proc.tree.ProcessNode` of the cron
daemon process.
"""
# We'll first try to terminate the cron daemon using whatever daemon
# supervision system is in place (e.g. upstart or systemd) instead of
# simply killing the cron process, as a signal that we don't want the cron
# daemon to be restarted.
logger.info("Stopping cron daemon (service cron stop) ..")
if not execute('service', 'cron', 'stop', check=False):
logger.warning("The 'service cron stop' command reported an error!")
# If the service command failed to terminate the cron daemon we will
# terminate cron explicitly, in the assumption that we're dealing with a
# naive /etc/init.d/cron script that doesn't use SIGKILL when SIGTERM fails
# (due to our earlier SIGSTOP).
if cron_daemon.is_alive:
cron_daemon.kill()
class CronDaemonNotRunning(Exception):
"""Exception raised by :func:`find_cron_daemon()` when it cannot locate the cron daemon process."""
|
|
import asyncio
import time
import traceback
from sortedcontainers import SortedDict
from . import platform
from . import helper
if platform.is_raspberry():
from adafruit import pca9685
from adafruit import wirebus
else:
from .dummy import pca9685
from .dummy import wirebus
def parse_config(config, loop=None, logger=None):
# {
# "drivers": [<Driver>],
# "regions": {
# "LEFT_HAND": [<Actor>]
# }
# }
def driver_for_address(drivers, address, i2c_bus_number):
if address not in drivers:
if not wirebus.I2C.isDeviceAnswering(address, i2c_bus_number):
return None
driver = pca9685.Driver(address, i2c_bus_number, logger=logger)
drivers[address] = driver
return drivers[address]
loop = loop if loop is not None else asyncio.get_event_loop()
vibration_config = config['vibration']
global_actor_mapping_curve_degree = vibration_config.get('actor_mapping_curve_degree', None)
global_actor_min_intensity = vibration_config.get('actor_min_intensity', None)
global_actor_min_intensity_warmup = vibration_config.get('actor_min_intensity_warmup', None)
global_actor_min_instant_intensity = vibration_config.get('actor_min_instant_intensity', None)
drivers = {} # driver_address -> driver
regions = {} # region_name -> actor_index -> actor
for region_config in vibration_config['regions']:
dirver_address = region_config['driver_address']
if type(dirver_address) is str:
dirver_address = int(dirver_address, 16) if dirver_address.startswith('0x') else int(dirver_address)
driver = driver_for_address(drivers, dirver_address, region_config['i2c_bus_number'])
if driver is None:
if logger is not None:
logger.error("No driver found for at address 0x%02X on I2C bus %d for region %s - ignoring region", dirver_address, region_config['i2c_bus_number'], region_config['name'])
continue
if region_config['name'] not in regions:
regions[region_config['name']] = {}
region_actor_mapping_curve_degree = region_config.get('actor_mapping_curve_degree', global_actor_mapping_curve_degree)
region_actor_min_intensity = region_config.get('actor_min_intensity', global_actor_min_intensity)
region_actor_min_intensity_warmup = region_config.get('actor_min_intensity_warmup', global_actor_min_intensity_warmup)
region_actor_min_instant_intensity = region_config.get('actor_min_instant_intensity', global_actor_min_instant_intensity)
region_actors = regions[region_config['name']]
for actor_config in region_config['actors']:
if actor_config['index'] in region_actors:
if logger is not None:
logger.error("Multiple actors configured with index %d in region %s - ignoring subsequent definitions", actor_config['index'], region_config['name'])
continue
else:
vibration_motor = VibrationMotor(driver=driver, outlet=actor_config['outlet'], index_in_region=actor_config['index'], position=actor_config['position'], loop=loop, logger=logger)
mapping_curve_degree = actor_config.get('mapping_curve_degree', region_actor_mapping_curve_degree)
min_intensity = actor_config.get('min_intensity', region_actor_min_intensity)
min_intensity_warmup = actor_config.get('min_intensity_warmup', region_actor_min_intensity_warmup)
min_instant_intensity = actor_config.get('min_instant_intensity', region_actor_min_instant_intensity)
if mapping_curve_degree is not None:
vibration_motor.mapping_curve_degree = mapping_curve_degree
if min_intensity is not None:
vibration_motor.min_intensity = min_intensity
if min_intensity_warmup is not None:
vibration_motor.min_intensity_warmup = min_intensity_warmup
if min_instant_intensity is not None:
vibration_motor.min_instant_intensity = min_instant_intensity
region_actors[actor_config['index']] = vibration_motor
for region_name in regions:
regions[region_name] = list(regions[region_name].values())
return { "drivers": list(drivers.values()), "regions": regions }
class PrioritizedIntensity(object):
_MIN_VALUE = 0.005
def __init__(self):
self._values = SortedDict()
def set(self, value, priority=100):
value = float(value)
if value < self._MIN_VALUE and priority in self._values:
del self._values[priority]
else:
self._values[priority] = value
def eval(self):
if not self._values:
return 0.0
return self._values[self._values.iloc[- 1]]
def top_priority(self):
if not self._values:
return 0
return self._values.keys()[len(self._values) - 1]
def reset(self):
self._values.clear()
class VibrationMotor(object):
_SENSITIVITY = 0.005 # ignore any changes below the this value and treat values below as "motor off"
def __init__(self, driver, outlet, index_in_region, position=None, loop=None, logger=None):
self._loop = loop if loop is not None else asyncio.get_event_loop()
self.driver = driver
self.outlet = outlet
self.index_in_region = index_in_region
self.position = position
self.logger = logger
self.profiler = None
self.mapping_curve_degree = 1.5 # degree of the function used to map intensity values from [0, 1] to the supported motor range. Use '2' for square, '3' for cubic and so on. No matter which degree, it is ensured an intensity of 0 is always off and an intensity of 1 always equals full motor intensity. Only supports positive values.
self.min_intensity = 0.3 # minimum intensity at which the motor will keep running (maybe after being startet at a higher intensity)
self.min_instant_intensity = 0.5 # minimum intensity that can be applied to the motor directly
self.min_intensity_warmup = 0.2 # how long does the motor need to be run at _MOTOR_MIN_INSTANT_INTENSITY before it's okay to switch down to _MOTOR_MIN_INTENSITY
self._intensity = PrioritizedIntensity()
self._target_intensity = self._intensity.eval()
self.__current_intensity = 0
self._running_since = None
def _profile(self, action, *args):
if self.profiler is not None:
self.profiler.log(action, *args)
def _map_intensity(self, intensity):
return self.min_intensity + (1 - self.min_intensity) * intensity ** self.mapping_curve_degree
def _running_time(self):
if self._running_since is None:
return 0
else:
return time.time() - self._running_since
def _can_set_directly(self, intensity):
if intensity < self._SENSITIVITY: # turn off
return True
if intensity >= self.min_instant_intensity: # intense enough to start instantly
return True
if self._running_time() > self.min_intensity_warmup: # running long enough
return True
return False
@property
def _current_intensity(self):
return self.__current_intensity
@_current_intensity.setter
def _current_intensity(self, value):
if abs(value - self.__current_intensity) < self._SENSITIVITY:
return
if self.logger is not None:
self.logger.debug("setting %s to %.3f", self.position, value)
self.__current_intensity = value
self._profile("set_pwm", self.index_in_region, self.__current_intensity)
self.driver.setPWM(self.outlet, 0, self.__current_intensity)
if value < self._SENSITIVITY:
self._running_since = None
elif self._running_since is None:
self._running_since = time.time()
def intensity(self):
return self._intensity.eval()
@asyncio.coroutine
def set_intensity(self, intensity, priority=100):
intensity = float(intensity)
if (intensity < 0 or intensity > 1) and self.logger:
self.logger.warning('clamping intensity - not in interval [0, 1]: %s' % intensity)
intensity = max(min(intensity, 1), 0)
if __debug__:
self.logger.warning("".join(traceback.format_stack()))
self._intensity.set(intensity, priority)
if self._intensity.eval() < self._SENSITIVITY:
self._target_intensity = 0
else:
self._target_intensity = self._map_intensity(self._intensity.eval())
if self._can_set_directly(self._target_intensity):
self._profile("set_intensity", self.index_in_region, intensity, priority, self._target_intensity, 'direct')
self._current_intensity = self._target_intensity
future = asyncio.Future()
future.set_result(self._target_intensity)
return future
else:
self._profile("set_intensity", self.index_in_region, intensity, priority, self._target_intensity, 'delayed')
return helper.create_exception_reporting_task(self.set_intensity_delayed(), loop=self._loop, logger=self.logger)
@asyncio.coroutine
def set_intensity_delayed(self):
if self._current_intensity < self.min_intensity:
self._current_intensity = self.min_instant_intensity
delay = self.min_intensity_warmup - self._running_time()
yield from asyncio.sleep(delay)
self._current_intensity = self._target_intensity
|
|
# -*- coding: utf-8 -*-
"""
sphinx.util
~~~~~~~~~~~
Utility functions for Sphinx.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import sys
import shutil
import fnmatch
import tempfile
import posixpath
import traceback
import unicodedata
from os import path
from codecs import open, BOM_UTF8
from collections import deque
import docutils
from docutils.utils import relative_path
import jinja2
import sphinx
from sphinx.errors import PycodeError
from sphinx.util.pycompat import bytes
# import other utilities; partly for backwards compatibility, so don't
# prune unused ones indiscriminately
from sphinx.util.osutil import SEP, os_path, relative_uri, ensuredir, walk, \
mtimes_of_files, movefile, copyfile, copytimes, make_filename, ustrftime
from sphinx.util.nodes import nested_parse_with_titles, split_explicit_title, \
explicit_title_re, caption_ref_re
from sphinx.util.matching import patfilter
# Generally useful regular expressions.
ws_re = re.compile(r'\s+')
url_re = re.compile(r'(?P<schema>.+)://.*')
# High-level utility functions.
def docname_join(basedocname, docname):
return posixpath.normpath(
posixpath.join('/' + basedocname, '..', docname))[1:]
def path_stabilize(filepath):
"normalize path separater and unicode string"
newpath = filepath.replace(os.path.sep, SEP)
if isinstance(newpath, unicode):
newpath = unicodedata.normalize('NFC', newpath)
return newpath
def get_matching_files(dirname, exclude_matchers=()):
"""Get all file names in a directory, recursively.
Exclude files and dirs matching some matcher in *exclude_matchers*.
"""
# dirname is a normalized absolute path.
dirname = path.normpath(path.abspath(dirname))
dirlen = len(dirname) + 1 # exclude final os.path.sep
for root, dirs, files in walk(dirname, followlinks=True):
relativeroot = root[dirlen:]
qdirs = enumerate(path_stabilize(path.join(relativeroot, dn))
for dn in dirs)
qfiles = enumerate(path_stabilize(path.join(relativeroot, fn))
for fn in files)
for matcher in exclude_matchers:
qdirs = [entry for entry in qdirs if not matcher(entry[1])]
qfiles = [entry for entry in qfiles if not matcher(entry[1])]
dirs[:] = sorted(dirs[i] for (i, _) in qdirs)
for i, filename in sorted(qfiles):
yield filename
def get_matching_docs(dirname, suffix, exclude_matchers=()):
"""Get all file names (without suffix) matching a suffix in a directory,
recursively.
Exclude files and dirs matching a pattern in *exclude_patterns*.
"""
suffixpattern = '*' + suffix
for filename in get_matching_files(dirname, exclude_matchers):
if not fnmatch.fnmatch(filename, suffixpattern):
continue
yield filename[:-len(suffix)]
class FilenameUniqDict(dict):
"""
A dictionary that automatically generates unique names for its keys,
interpreted as filenames, and keeps track of a set of docnames they
appear in. Used for images and downloadable files in the environment.
"""
def __init__(self):
self._existing = set()
def add_file(self, docname, newfile):
if newfile in self:
self[newfile][0].add(docname)
return self[newfile][1]
uniquename = path.basename(newfile)
base, ext = path.splitext(uniquename)
i = 0
while uniquename in self._existing:
i += 1
uniquename = '%s%s%s' % (base, i, ext)
self[newfile] = (set([docname]), uniquename)
self._existing.add(uniquename)
return uniquename
def purge_doc(self, docname):
for filename, (docs, unique) in self.items():
docs.discard(docname)
if not docs:
del self[filename]
self._existing.discard(unique)
def __getstate__(self):
return self._existing
def __setstate__(self, state):
self._existing = state
def copy_static_entry(source, targetdir, builder, context={},
exclude_matchers=(), level=0):
"""Copy a HTML builder static_path entry from source to targetdir.
Handles all possible cases of files, directories and subdirectories.
"""
if exclude_matchers:
relpath = relative_path(path.join(builder.srcdir, 'dummy'), source)
for matcher in exclude_matchers:
if matcher(relpath):
return
if path.isfile(source):
target = path.join(targetdir, path.basename(source))
if source.lower().endswith('_t') and builder.templates:
# templated!
fsrc = open(source, 'r', encoding='utf-8')
fdst = open(target[:-2], 'w', encoding='utf-8')
fdst.write(builder.templates.render_string(fsrc.read(), context))
fsrc.close()
fdst.close()
else:
copyfile(source, target)
elif path.isdir(source):
if not path.isdir(targetdir):
os.mkdir(targetdir)
for entry in os.listdir(source):
if entry.startswith('.'):
continue
newtarget = targetdir
if path.isdir(path.join(source, entry)):
newtarget = path.join(targetdir, entry)
copy_static_entry(path.join(source, entry), newtarget,
builder, context, level=level+1,
exclude_matchers=exclude_matchers)
_DEBUG_HEADER = '''\
# Sphinx version: %s
# Python version: %s
# Docutils version: %s %s
# Jinja2 version: %s
# Loaded extensions:
'''
def save_traceback(app):
"""Save the current exception's traceback in a temporary file."""
import platform
exc = traceback.format_exc()
fd, path = tempfile.mkstemp('.log', 'sphinx-err-')
os.write(fd, (_DEBUG_HEADER %
(sphinx.__version__,
platform.python_version(),
docutils.__version__, docutils.__version_details__,
jinja2.__version__)).encode('utf-8'))
if app is not None:
for extname, extmod in app._extensions.iteritems():
os.write(fd, ('# %s from %s\n' % (
extname, getattr(extmod, '__file__', 'unknown'))
).encode('utf-8'))
os.write(fd, exc.encode('utf-8'))
os.close(fd)
return path
def get_module_source(modname):
"""Try to find the source code for a module.
Can return ('file', 'filename') in which case the source is in the given
file, or ('string', 'source') which which case the source is the string.
"""
if modname not in sys.modules:
try:
__import__(modname)
except Exception, err:
raise PycodeError('error importing %r' % modname, err)
mod = sys.modules[modname]
filename = getattr(mod, '__file__', None)
loader = getattr(mod, '__loader__', None)
if loader and getattr(loader, 'get_filename', None):
try:
filename = loader.get_filename(modname)
except Exception, err:
raise PycodeError('error getting filename for %r' % filename, err)
if filename is None and loader:
try:
return 'string', loader.get_source(modname)
except Exception, err:
raise PycodeError('error getting source for %r' % modname, err)
if filename is None:
raise PycodeError('no source found for module %r' % modname)
filename = path.normpath(path.abspath(filename))
lfilename = filename.lower()
if lfilename.endswith('.pyo') or lfilename.endswith('.pyc'):
filename = filename[:-1]
if not path.isfile(filename) and path.isfile(filename + 'w'):
filename += 'w'
elif not (lfilename.endswith('.py') or lfilename.endswith('.pyw')):
raise PycodeError('source is not a .py file: %r' % filename)
if not path.isfile(filename):
raise PycodeError('source file is not present: %r' % filename)
return 'file', filename
# a regex to recognize coding cookies
_coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
def detect_encoding(readline):
"""Like tokenize.detect_encoding() from Py3k, but a bit simplified."""
def read_or_stop():
try:
return readline()
except StopIteration:
return None
def get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace('_', '-')
if enc == 'utf-8' or enc.startswith('utf-8-'):
return 'utf-8'
if enc in ('latin-1', 'iso-8859-1', 'iso-latin-1') or \
enc.startswith(('latin-1-', 'iso-8859-1-', 'iso-latin-1-')):
return 'iso-8859-1'
return orig_enc
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
matches = _coding_re.findall(line_string)
if not matches:
return None
return get_normal_name(matches[0])
default = sys.getdefaultencoding()
first = read_or_stop()
if first and first.startswith(BOM_UTF8):
first = first[3:]
default = 'utf-8-sig'
if not first:
return default
encoding = find_cookie(first)
if encoding:
return encoding
second = read_or_stop()
if not second:
return default
encoding = find_cookie(second)
if encoding:
return encoding
return default
# Low-level utility functions and classes.
class Tee(object):
"""
File-like object writing to two streams.
"""
def __init__(self, stream1, stream2):
self.stream1 = stream1
self.stream2 = stream2
def write(self, text):
self.stream1.write(text)
self.stream2.write(text)
def flush(self):
if hasattr(self.stream1, 'flush'):
self.stream1.flush()
if hasattr(self.stream2, 'flush'):
self.stream2.flush()
def parselinenos(spec, total):
"""Parse a line number spec (such as "1,2,4-6") and return a list of
wanted line numbers.
"""
items = list()
parts = spec.split(',')
for part in parts:
try:
begend = part.strip().split('-')
if len(begend) > 2:
raise ValueError
if len(begend) == 1:
items.append(int(begend[0])-1)
else:
start = (begend[0] == '') and 0 or int(begend[0])-1
end = (begend[1] == '') and total or int(begend[1])
items.extend(xrange(start, end))
except Exception:
raise ValueError('invalid line number spec: %r' % spec)
return items
def force_decode(string, encoding):
"""Forcibly get a unicode string out of a bytestring."""
if isinstance(string, bytes):
try:
if encoding:
string = string.decode(encoding)
else:
# try decoding with utf-8, should only work for real UTF-8
string = string.decode('utf-8')
except UnicodeError:
# last resort -- can't fail
string = string.decode('latin1')
return string
class attrdict(dict):
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, val):
self[key] = val
def __delattr__(self, key):
del self[key]
def rpartition(s, t):
"""Similar to str.rpartition from 2.5, but doesn't return the separator."""
i = s.rfind(t)
if i != -1:
return s[:i], s[i+len(t):]
return '', s
def split_into(n, type, value):
"""Split an index entry into a given number of parts at semicolons."""
parts = map(lambda x: x.strip(), value.split(';', n-1))
if sum(1 for part in parts if part) < n:
raise ValueError('invalid %s index entry %r' % (type, value))
return parts
def split_index_msg(type, value):
# new entry types must be listed in directives/other.py!
result = []
try:
if type == 'single':
try:
result = split_into(2, 'single', value)
except ValueError:
result = split_into(1, 'single', value)
elif type == 'pair':
result = split_into(2, 'pair', value)
elif type == 'triple':
result = split_into(3, 'triple', value)
elif type == 'see':
result = split_into(2, 'see', value)
elif type == 'seealso':
result = split_into(2, 'see', value)
except ValueError:
pass
return result
def format_exception_cut_frames(x=1):
"""Format an exception with traceback, but only the last x frames."""
typ, val, tb = sys.exc_info()
#res = ['Traceback (most recent call last):\n']
res = []
tbres = traceback.format_tb(tb)
res += tbres[-x:]
res += traceback.format_exception_only(typ, val)
return ''.join(res)
class PeekableIterator(object):
"""
An iterator which wraps any iterable and makes it possible to peek to see
what's the next item.
"""
def __init__(self, iterable):
self.remaining = deque()
self._iterator = iter(iterable)
def __iter__(self):
return self
def next(self):
"""Return the next item from the iterator."""
if self.remaining:
return self.remaining.popleft()
return self._iterator.next()
def push(self, item):
"""Push the `item` on the internal stack, it will be returned on the
next :meth:`next` call.
"""
self.remaining.append(item)
def peek(self):
"""Return the next item without changing the state of the iterator."""
item = self.next()
self.push(item)
return item
|
|
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import hashlib
import logging
from dataclasses import dataclass
from enum import Enum
from textwrap import dedent
from typing import Iterable, Mapping
from pants.engine.collection import DeduplicatedCollection
from pants.engine.engine_aware import EngineAwareReturnType, SideEffecting
from pants.engine.fs import EMPTY_DIGEST, CreateDigest, Digest, FileContent, FileDigest
from pants.engine.internals.selectors import MultiGet
from pants.engine.platform import Platform
from pants.engine.rules import Get, collect_rules, rule
from pants.option.global_options import GlobalOptions
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.meta import frozen_after_init
from pants.util.ordered_set import OrderedSet
from pants.util.strutil import create_path_env_var, pluralize
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class ProductDescription:
value: str
class ProcessCacheScope(Enum):
# Cached in all locations, regardless of success or failure.
ALWAYS = "always"
# Cached in all locations, but only if the process exits successfully.
SUCCESSFUL = "successful"
# Cached only in memory (i.e. memoized in pantsd), but never persistently, regardless of
# success vs. failure.
PER_RESTART_ALWAYS = "per_restart_always"
# Cached only in memory (i.e. memoized in pantsd), but never persistently, and only if
# successful.
PER_RESTART_SUCCESSFUL = "per_restart_successful"
# Will run once per Session, i.e. once per run of Pants. This happens because the engine
# de-duplicates identical work; the process is neither memoized in memory nor cached to disk.
PER_SESSION = "per_session"
@frozen_after_init
@dataclass(unsafe_hash=True)
class Process:
argv: tuple[str, ...]
description: str = dataclasses.field(compare=False)
level: LogLevel
input_digest: Digest
working_directory: str | None
env: FrozenDict[str, str]
append_only_caches: FrozenDict[str, str]
output_files: tuple[str, ...]
output_directories: tuple[str, ...]
timeout_seconds: int | float
jdk_home: str | None
use_nailgun: Digest
execution_slot_variable: str | None
cache_scope: ProcessCacheScope
def __init__(
self,
argv: Iterable[str],
*,
description: str,
level: LogLevel = LogLevel.INFO,
input_digest: Digest = EMPTY_DIGEST,
working_directory: str | None = None,
env: Mapping[str, str] | None = None,
append_only_caches: Mapping[str, str] | None = None,
output_files: Iterable[str] | None = None,
output_directories: Iterable[str] | None = None,
timeout_seconds: int | float | None = None,
jdk_home: str | None = None,
use_nailgun: Digest = EMPTY_DIGEST,
execution_slot_variable: str | None = None,
cache_scope: ProcessCacheScope = ProcessCacheScope.SUCCESSFUL,
) -> None:
"""Request to run a subprocess, similar to subprocess.Popen.
This process will be hermetic, meaning that it cannot access files and environment variables
that are not explicitly populated. For example, $PATH will not be defined by default, unless
populated through the `env` parameter.
Usually, you will want to provide input files/directories via the parameter `input_digest`.
The process will then be able to access these paths through relative paths. If you want to
give multiple input digests, first merge them with `await Get(Digest, MergeDigests)`.
Often, you will want to capture the files/directories created in the process. To do this,
you can either set `output_files` or `output_directories`. The specified paths should be
specified relative to the `working_directory`, if any, and will then be used to populate
`output_digest` on the `ProcessResult`. If you want to split up this output digest into
multiple digests, use `await Get(Digest, DigestSubset)` on the `output_digest`.
To actually run the process, use `await Get(ProcessResult, Process)` or
`await Get(FallibleProcessResult, Process)`.
Example:
result = await Get(
ProcessResult, Process(["/bin/echo", "hello world"], description="demo")
)
assert result.stdout == b"hello world"
"""
if isinstance(argv, str):
raise ValueError("argv must be a sequence of strings, but was a single string.")
self.argv = tuple(argv)
self.description = description
self.level = level
self.input_digest = input_digest
self.working_directory = working_directory
self.env = FrozenDict(env or {})
self.append_only_caches = FrozenDict(append_only_caches or {})
self.output_files = tuple(output_files or ())
self.output_directories = tuple(output_directories or ())
# NB: A negative or None time value is normalized to -1 to ease the transfer to Rust.
self.timeout_seconds = timeout_seconds if timeout_seconds and timeout_seconds > 0 else -1
self.jdk_home = jdk_home
self.use_nailgun = use_nailgun
self.execution_slot_variable = execution_slot_variable
self.cache_scope = cache_scope
@frozen_after_init
@dataclass(unsafe_hash=True)
class MultiPlatformProcess:
platform_constraints: tuple[str | None, ...]
processes: tuple[Process, ...]
def __init__(self, request_dict: dict[Platform | None, Process]) -> None:
if len(request_dict) == 0:
raise ValueError("At least one platform-constrained Process must be passed.")
serialized_constraints = tuple(
constraint.value if constraint else None for constraint in request_dict
)
if len([req.description for req in request_dict.values()]) != 1:
raise ValueError(
f"The `description` of all processes in a {MultiPlatformProcess.__name__} must "
f"be identical, but got: {list(request_dict.values())}."
)
self.platform_constraints = serialized_constraints
self.processes = tuple(request_dict.values())
@property
def product_description(self) -> ProductDescription:
return ProductDescription(self.processes[0].description)
@dataclass(frozen=True)
class ProcessResult:
"""Result of executing a process which should not fail.
If the process has a non-zero exit code, this will raise an exception, unlike
FallibleProcessResult.
"""
stdout: bytes
stdout_digest: FileDigest
stderr: bytes
stderr_digest: FileDigest
output_digest: Digest
@dataclass(frozen=True)
class FallibleProcessResult:
"""Result of executing a process which might fail.
If the process has a non-zero exit code, this will not raise an exception, unlike ProcessResult.
"""
stdout: bytes
stdout_digest: FileDigest
stderr: bytes
stderr_digest: FileDigest
exit_code: int
output_digest: Digest
@dataclass(frozen=True)
class FallibleProcessResultWithPlatform:
"""Result of executing a process which might fail, along with the platform it ran on."""
stdout: bytes
stdout_digest: FileDigest
stderr: bytes
stderr_digest: FileDigest
exit_code: int
output_digest: Digest
platform: Platform
class ProcessExecutionFailure(Exception):
"""Used to denote that a process exited, but was unsuccessful in some way.
For example, exiting with a non-zero code.
"""
def __init__(
self,
exit_code: int,
stdout: bytes,
stderr: bytes,
process_description: str,
*,
local_cleanup: bool,
) -> None:
# These are intentionally "public" members.
self.exit_code = exit_code
self.stdout = stdout
self.stderr = stderr
# NB: We don't use dedent on a single format string here because it would attempt to
# interpret the stdio content.
err_strings = [
f"Process '{process_description}' failed with exit code {exit_code}.",
"stdout:",
stdout.decode(),
"stderr:",
stderr.decode(),
]
if local_cleanup:
err_strings.append(
"\n\n"
"Use --no-process-execution-local-cleanup to preserve process chroots "
"for inspection."
)
super().__init__("\n".join(err_strings))
@rule
def get_multi_platform_request_description(req: MultiPlatformProcess) -> ProductDescription:
return req.product_description
@rule
def upcast_process(req: Process) -> MultiPlatformProcess:
"""This rule allows an Process to be run as a platform compatible MultiPlatformProcess."""
return MultiPlatformProcess({None: req})
@rule
def fallible_to_exec_result_or_raise(
fallible_result: FallibleProcessResult,
description: ProductDescription,
global_options: GlobalOptions,
) -> ProcessResult:
"""Converts a FallibleProcessResult to a ProcessResult or raises an error."""
if fallible_result.exit_code == 0:
return ProcessResult(
stdout=fallible_result.stdout,
stdout_digest=fallible_result.stdout_digest,
stderr=fallible_result.stderr,
stderr_digest=fallible_result.stderr_digest,
output_digest=fallible_result.output_digest,
)
raise ProcessExecutionFailure(
fallible_result.exit_code,
fallible_result.stdout,
fallible_result.stderr,
description.value,
local_cleanup=global_options.options.process_execution_local_cleanup,
)
@rule
def remove_platform_information(res: FallibleProcessResultWithPlatform) -> FallibleProcessResult:
return FallibleProcessResult(
exit_code=res.exit_code,
stdout=res.stdout,
stdout_digest=res.stdout_digest,
stderr=res.stderr,
stderr_digest=res.stderr_digest,
output_digest=res.output_digest,
)
@dataclass(frozen=True)
class InteractiveProcessResult:
exit_code: int
@frozen_after_init
@dataclass(unsafe_hash=True)
class InteractiveProcess(SideEffecting):
argv: tuple[str, ...]
env: FrozenDict[str, str]
input_digest: Digest
run_in_workspace: bool
forward_signals_to_process: bool
restartable: bool
def __init__(
self,
argv: Iterable[str],
*,
env: Mapping[str, str] | None = None,
input_digest: Digest = EMPTY_DIGEST,
run_in_workspace: bool = False,
forward_signals_to_process: bool = True,
restartable: bool = False,
) -> None:
"""Request to run a subprocess in the foreground, similar to subprocess.run().
Unlike `Process`, the result will not be cached.
To run the process, use `await Effect(InteractiveProcessResult, InteractiveProcess(..))`
in a `@goal_rule`.
`forward_signals_to_process` controls whether pants will allow a SIGINT signal
sent to a process by hitting Ctrl-C in the terminal to actually reach the process,
or capture that signal itself, blocking it from the process.
"""
self.argv = tuple(argv)
self.env = FrozenDict(env or {})
self.input_digest = input_digest
self.run_in_workspace = run_in_workspace
self.forward_signals_to_process = forward_signals_to_process
self.restartable = restartable
self.__post_init__()
def __post_init__(self):
if self.input_digest != EMPTY_DIGEST and self.run_in_workspace:
raise ValueError(
"InteractiveProcessRequest should use the Workspace API to materialize any needed "
"files when it runs in the workspace"
)
@classmethod
def from_process(
cls,
process: Process,
*,
forward_signals_to_process: bool = True,
restartable: bool = False,
) -> InteractiveProcess:
return InteractiveProcess(
argv=process.argv,
env=process.env,
input_digest=process.input_digest,
forward_signals_to_process=forward_signals_to_process,
restartable=restartable,
)
@frozen_after_init
@dataclass(unsafe_hash=True)
class BinaryPathTest:
args: tuple[str, ...]
fingerprint_stdout: bool
def __init__(self, args: Iterable[str], fingerprint_stdout: bool = True) -> None:
self.args = tuple(args)
self.fingerprint_stdout = fingerprint_stdout
class SearchPath(DeduplicatedCollection[str]):
"""The search path for binaries; i.e.: the $PATH."""
@frozen_after_init
@dataclass(unsafe_hash=True)
class BinaryPathRequest:
"""Request to find a binary of a given name.
If a `test` is specified all binaries that are found will be executed with the test args and
only those binaries whose test executions exit with return code 0 will be retained.
Additionally, if test execution includes stdout content, that will be used to fingerprint the
binary path so that upgrades and downgrades can be detected. A reasonable test for many programs
might be `BinaryPathTest(args=["--version"])` since it will both ensure the program runs and
also produce stdout text that changes upon upgrade or downgrade of the binary at the discovered
path.
"""
search_path: SearchPath
binary_name: str
test: BinaryPathTest | None
def __init__(
self,
*,
search_path: Iterable[str],
binary_name: str,
test: BinaryPathTest | None = None,
) -> None:
self.search_path = SearchPath(search_path)
self.binary_name = binary_name
self.test = test
@frozen_after_init
@dataclass(unsafe_hash=True)
class BinaryPath:
path: str
fingerprint: str
def __init__(self, path: str, fingerprint: str | None = None) -> None:
self.path = path
self.fingerprint = self._fingerprint() if fingerprint is None else fingerprint
@staticmethod
def _fingerprint(content: bytes | bytearray | memoryview | None = None) -> str:
hasher = hashlib.sha256() if content is None else hashlib.sha256(content)
return hasher.hexdigest()
@classmethod
def fingerprinted(
cls, path: str, representative_content: bytes | bytearray | memoryview
) -> BinaryPath:
return cls(path, fingerprint=cls._fingerprint(representative_content))
@frozen_after_init
@dataclass(unsafe_hash=True)
class BinaryPaths(EngineAwareReturnType):
binary_name: str
paths: tuple[BinaryPath, ...]
def __init__(self, binary_name: str, paths: Iterable[BinaryPath] | None = None):
self.binary_name = binary_name
self.paths = tuple(OrderedSet(paths) if paths else ())
def message(self) -> str:
if not self.paths:
return f"failed to find {self.binary_name}"
found_msg = f"found {self.binary_name} at {self.paths[0]}"
if len(self.paths) > 1:
found_msg = f"{found_msg} and {pluralize(len(self.paths) - 1, 'other location')}"
return found_msg
@property
def first_path(self) -> BinaryPath | None:
"""Return the first path to the binary that was discovered, if any."""
return next(iter(self.paths), None)
class BinaryNotFoundError(EnvironmentError):
@classmethod
def from_request(
cls,
request: BinaryPathRequest,
*,
rationale: str | None = None,
alternative_solution: str | None = None,
) -> BinaryNotFoundError:
"""When no binary is found via `BinaryPaths`, and it is not recoverable.
:param rationale: A short description of why this binary is needed, e.g.
"download the tools Pants needs" or "run Python programs".
:param alternative_solution: A description of what else users can do to fix the issue,
beyond installing the program. For example, "Alternatively, you can set the option
`--python-setup-interpreter-search-path` to change the paths searched."
"""
msg = (
f"Cannot find `{request.binary_name}` on `{sorted(request.search_path)}`. Please "
"ensure that it is installed"
)
msg += f" so that Pants can {rationale}." if rationale else "."
if alternative_solution:
msg += f"\n\n{alternative_solution}"
return BinaryNotFoundError(msg)
class BashBinary(BinaryPath):
"""The `bash` binary."""
DEFAULT_SEARCH_PATH = SearchPath(("/usr/bin", "/bin", "/usr/local/bin"))
@dataclass(frozen=True)
class BashBinaryRequest:
search_path: SearchPath = BashBinary.DEFAULT_SEARCH_PATH
@rule(desc="Finding the `bash` binary", level=LogLevel.DEBUG)
async def find_bash(bash_request: BashBinaryRequest) -> BashBinary:
request = BinaryPathRequest(
binary_name="bash",
search_path=bash_request.search_path,
test=BinaryPathTest(args=["--version"]),
)
paths = await Get(BinaryPaths, BinaryPathRequest, request)
first_path = paths.first_path
if not first_path:
raise BinaryNotFoundError.from_request(request)
return BashBinary(first_path.path, first_path.fingerprint)
@rule
async def get_bash() -> BashBinary:
# Expose bash to external consumers.
return await Get(BashBinary, BashBinaryRequest())
@rule
async def find_binary(request: BinaryPathRequest) -> BinaryPaths:
# If we are not already locating bash, recurse to locate bash to use it as an absolute path in
# our shebang. This avoids mixing locations that we would search for bash into the search paths
# of the request we are servicing.
# TODO(#10769): Replace this script with a statically linked native binary so we don't
# depend on either /bin/bash being available on the Process host.
if request.binary_name == "bash":
shebang = "#!/usr/bin/env bash"
else:
bash = await Get(BashBinary, BashBinaryRequest())
shebang = f"#!{bash.path}"
# Some subtle notes with this script:
#
# - The backslash after the `"""` ensures that the shebang is at the start of the script file.
# Many OSs will not see the shebang if there is intervening whitespace.
# - We run the script with `ProcessResult` instead of `FallibleProcessResult` so that we
# can catch bugs in the script itself, given an earlier silent failure.
# - We do not use `set -e` like normal because it causes the line
# `command which -a <bin> || true` to fail the script when using Bash 3, which macOS
# uses by default.
# - We set `ProcessCacheScope.PER_RESTART_SUCCESSFUL` to force re-run since any binary found
# on the host system today could be gone tomorrow. Ideally we'd only do this for local
# processes since all known remoting configurations include a static container image as
# part of their cache key which automatically avoids this problem. See #10769 for a
# solution that is less of a tradeoff.
script_path = "./find_binary.sh"
script_content = dedent(
f"""\
{shebang}
set -uox pipefail
if command -v which > /dev/null; then
command which -a $1 || true
else
command -v $1 || true
fi
"""
)
script_digest = await Get(
Digest,
CreateDigest([FileContent(script_path, script_content.encode(), is_executable=True)]),
)
search_path = create_path_env_var(request.search_path)
result = await Get(
ProcessResult,
Process(
description=f"Searching for `{request.binary_name}` on PATH={search_path}",
level=LogLevel.DEBUG,
input_digest=script_digest,
argv=[script_path, request.binary_name],
env={"PATH": search_path},
cache_scope=ProcessCacheScope.PER_RESTART_SUCCESSFUL,
),
)
binary_paths = BinaryPaths(binary_name=request.binary_name)
found_paths = result.stdout.decode().splitlines()
if not request.test:
return dataclasses.replace(binary_paths, paths=[BinaryPath(path) for path in found_paths])
results = await MultiGet(
Get(
FallibleProcessResult,
Process(
description=f"Test binary {path}.",
level=LogLevel.DEBUG,
argv=[path, *request.test.args],
cache_scope=ProcessCacheScope.PER_RESTART_SUCCESSFUL,
),
)
for path in found_paths
)
return dataclasses.replace(
binary_paths,
paths=[
(
BinaryPath.fingerprinted(path, result.stdout)
if request.test.fingerprint_stdout
else BinaryPath(path, result.stdout.decode())
)
for path, result in zip(found_paths, results)
if result.exit_code == 0
],
)
def rules():
return collect_rules()
|
|
#-------------------------------------------------------------------------------
# Name: moveMotors
# Purpose: move steppers motors with the position and velocity of the blue joints
#
# Author: Luis Felipe Leiva H.
#
# Created: 09-10-2017
# Copyright: (c) felipe 2017
# Licence: <your licence>
#-------------------------------------------------------------------------------
# Imports used
from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor, Adafruit_StepperMotor
from math import pi
import time
import atexit
import threading
import decimal as d
#import constantsMotor as cMot
#try:
# from .constantsMotor import stepDist, numSteps, R
#except SystemError as e:
# from constantsMotor import stepDist, numSteps, R
# Imports Used
try:
from Utilities.instruction import Instruction, InterpretedInstruction
except SystemError as e:
from instruction import Instruction, InterpretedInstruction
try:
from Utilities.vector import Vector3, interpolatePoints
except SystemError as e:
from vector import Vector3, interpolatePoints
""" constantsMotor module """
# calibration constant
cte = d.Decimal(10)
# gear motor radius in milimeters
R = d.Decimal(6)
# const motor steps in grades in SINGLE mode
stepDeg = d.Decimal(1.8)
# const motor steps in centimeters in SINGLE mode
stepDist = d.Decimal(stepDeg*d.Decimal(pi)/d.Decimal(180)*R)
# number of motor steps
numSteps = d.Decimal(16)
"""Code for control the stepper motors"""
# recommended for auto-disabling motors on shutdown!
def turnOffMotors():
tophat.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
tophat.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
tophat.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
tophat.getMotor(4).run(Adafruit_MotorHAT.RELEASE)
bottomhat.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
bottomhat.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
bottomhat.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
bottomhat.getMotor(4).run(Adafruit_MotorHAT.RELEASE)
# create the tolerance rate
def makeTolerance(step, numSteps):
if (step == Adafruit_MotorHAT.SINGLE or step == Adafruit_MotorHAT.DOUBLE):
tolerance = d.Decimal(0.9)*stepDist
elif (step == Adafruit_MotorHAT.INTERLEAVE):
tolerance = (d.Decimal(0.9)*stepDist)/d.Decimal(2)
elif (step == Adafruit_MotorHAT.MICROSTEP):
tolerance = (d.Decimal(0.9)*stepDist)/d.Decimal(8)
else:
tolerance = d.Decimal(0.9)*stepDist
return numSteps*tolerance
# gives the angular velocity in REV/min when qi_vel is in mm/s
def angVel(qi_vel):
return d.Decimal(30)/d.Decimal(pi)*qi_vel/R
# update current position
# numMotor: can be 0, 1 or 2
# distStepMotor: distance per step
# direction: can be 1 or -1
def posUpdate(qPos, numMotor, numSteps, distStepMode, direction):
qPos[numMotor] += d.Decimal(direction)*(numSteps*distStepMode - d.Decimal((3/17)-(1/8)))
return qPos
def stepper_worker(stepper, numsteps, direction, style):
#print("Steppin!")
stepper.step(int(numsteps), int(direction), style)
#print("Done")
def moveSteppers(qPos, qRec, qVelReq, stepperThreads, numSteps, TOLERANCE):
q = [qPos.x / cte, qPos.y / cte, qPos.z / cte]
q_req = [qRec.x / cte, qRec.y / cte, qRec.z / cte]
q_vel_req = [qVelReq.x / cte, qVelReq.y / cte, qVelReq.z / cte]
TOLERANCE_v = d.Decimal(2) # before/after this I want the motors to move faster
case_v = [True, True, True]
for i in range(3):
case_v[i] = q_req[i] > q[i] + TOLERANCE + TOLERANCE_v or q_req[i] < q[i] - TOLERANCE - TOLERANCE_v
print('qRec=', qRec)
print('q_req =', q_req)
print('q_vel_req =', q_vel_req)
# Aqui esta preguntando para un i que fue definido mas abajo ----- hay que cambiarlo por un arreglo de bool
movBool = [True, True, True]
# Preguntar si los 3 objetos del arreglo son false por si es que hay que mover el motor
while (movBool[0] or movBool[1] or movBool[2]):
for i in range(3):
#print('q_req[i]=', q_req[i])
#print('q[i]=', q[i])
#print('TOLERANCE=', TOLERANCE)
case1 = q_req[i] > q[i] + TOLERANCE
case2 = q_req[i] < q[i] - TOLERANCE
movBool[i] = case1 or case2
#print('movBool=', movBool)
if movBool[i] and not stepperThreads[i].isAlive():
#print("Stepper %s" % i)
if (case2):
dir = Adafruit_MotorHAT.BACKWARD
direction = -1
else:
dir = Adafruit_MotorHAT.FORWARD
direction = +1
# define angular velocity in rev/min
w = angVel(q_vel_req[i])
# set velocity
steppers[i].setSpeed(abs(float(w)))
if (case_v[i]):
stepstyle = Adafruit_MotorHAT.SINGLE
recorrido_neto = stepDist/d.Decimal(8)
else:
stepstyle = Adafruit_MotorHAT.MICROSTEP
recorrido_neto = stepDist/d.Decimal(8)
print('Moving stepper i=', i)
# update current position
q = posUpdate(q, i, numSteps, recorrido_neto, direction)
# create thread with numStep steps
stepperThreads[i] = threading.Thread(target=stepper_worker, args=(steppers[i], numSteps, dir, stepstyle,))
stepperThreads[i].start()
time.sleep(0.1) # Small delay to stop from constantly polling threads (see: https://forums.adafruit.com/viewtopic.php?f=50&t=104354&p=562733#p562733)
# bottom hat is default address 0x60
bottomhat = Adafruit_MotorHAT(addr=0x60)
# top hat has A0 jumper closed, so its address 0x61
tophat = Adafruit_MotorHAT(addr=0x61)
# create empty threads (these will hold the stepper 1, 2 & 3 threads)
stepperThreads = [threading.Thread(), threading.Thread(), threading.Thread()]
# ni idea lo que hace, pero sirve
atexit.register(turnOffMotors)
# motor configurations
myStepper1 = bottomhat.getStepper(200, 1) # 200 steps/rev, motor port #1
myStepper2 = bottomhat.getStepper(200, 2) # 200 steps/rev, motor port #2
myStepper3 = tophat.getStepper(200, 1) # 200 steps/rev, motor port #1
# step type
stepstyle = Adafruit_MotorHAT.MICROSTEP
steppers = [myStepper1, myStepper2, myStepper3]
# movement precision
TOLERANCE = makeTolerance(stepstyle, numSteps)
def movMotor(qPos, inst):
# move motors
moveSteppers(qPos, inst.pos, inst.vel, stepperThreads, numSteps, TOLERANCE)
if __name__ == '__main__':
# global variables
q1 = q2 = q3 = 0 # define motors at home position for test purposes
qPos = [q1, q2, q3] # vector of position
q1_req = q2_req = q3_req = 10 # for test purposes
qReq = [20, 19.5, 22] # vector of required position
q1_vel_req = q2_vel_req = q3_vel_req = 500 # for test purposes
qVelReq = [q1_vel_req, q2_vel_req, q3_vel_req] # vector of required velocity
# move motors
#movMotor()
TOLERANCE = makeTolerance(stepstyle, numSteps)
moveSteppers(qPos, qReq, qVelReq, stepperThreads, numSteps, TOLERANCE)
#(q_req, q_vel_req, stepperThreads, numSteps, TOLERANCE)
|
|
#import sqlalchemy
#from sqlalchemy.ext.declarative import declared_attr
from flask_simple_alchemy import RelationshipFactories, simple_table_factory
from testers import *
FakeTableFK = fact.foreign_key_factory('faketable')
FakeTableOneToOne = fact.one_to_one_factory('FakeTable', FakeTableFK)
FakeTableManyToOne = fact.many_to_one_factory('FakeTable', FakeTableFK)
class YetAnotherFakeTable(db.Model, FakeTableOneToOne):
__tablename__ = 'yetanotherfaketable'
id = db.Column(db.Integer, primary_key=True)
unique_name = db.Column(db.String, unique=True)
class AClassForTesting(db.Model, FakeTableManyToOne):
id = db.Column(db.Integer, primary_key=True)
__tablename__ = 'aclassfortesting'
class YetAnotherFakeTableAgain(db.Model, FakeTableManyToOne):
__tablename__ = 'yetanotherfaketableagain'
id = db.Column(db.Integer, primary_key=True)
unique_name = db.Column(db.String, unique=True)
class AnotherFakeTable(db.Model, FakeTableFK):
__tablename__ = 'anotherfaketable'
id = db.Column(db.Integer, primary_key=True)
unique_name = db.Column(db.String, unique=True)
def test_RelationshipFactories_init():
#db = SQLAlchemy()
try:
factr = RelationshipFactories(db)
except:
assert "initialization errored" is "Yes"
assert isinstance(factr, RelationshipFactories)
assert factr.db
def test_RelationshipFactories_init_not_passed_SQLAlchemy_db_object():
class BlankClass(object):
pass
not_db = BlankClass()
errored = False
try:
RelationshipFactories(not_db)
errored = True
except:
errored = False
assert not errored
def test_foreign_key_func():
#db = SQLAlchemy()
#fact = RelationshipFactories(db)
fk = fact.foreign_key('jason')
assert isinstance(fk, db.ForeignKey)
assert fk._colspec == 'jason'
def print_update():
print "update!!"
fk2 = fact.foreign_key('rahul', onupdate=print_update)
assert fk2._colspec == 'rahul'
assert fk2.onupdate == print_update
def test_foreign_key_factory():
#fact = RelationshipFactories(db)
print FakeTableFK.faketable_id.__dict__
assert isinstance(FakeTableFK.faketable_id, db.Column)
testInt = db.Integer()
assert FakeTableFK.faketable_id.type.__dict__ == testInt.__dict__
FakeTableFK2 = fact.foreign_key_factory('faketable',
foreign_key='unique_name')
assert str(FakeTableFK2.faketable_unique_name.foreign_keys)\
== "set([ForeignKey('faketable.unique_name')])"
assert str(FakeTableFK2.faketable_unique_name.type)\
== 'INTEGER'
def test_relationship_func():
rel1to1 = fact.relationship(FakeTable, 'FakeTable',
uselist=False, lazy='select')
assert rel1to1
assert type(rel1to1) is type(db.relationship('FakeTable'))
def test_one_to_one_factory_default_foreign_key_as_id():
#db = SQLAlchemy()
FakeTableFK = fact.foreign_key_factory('faketable')
FakeTableOneToOne = fact.one_to_one_factory('FakeTable', FakeTableFK)
assert issubclass(FakeTableOneToOne, FakeTableFK)
assert FakeTableOneToOne.faketable_id is not None
assert isinstance(FakeTableOneToOne.faketable_id, db.Column)
def test_one_to_one_factory_foreign_key_as_second_arg():
OtherTableFK = fact.foreign_key_factory('othertable', 'uuid')
OtherTableOneToOne = fact.one_to_one_factory('OtherTable', OtherTableFK)
assert isinstance(OtherTableOneToOne.othertable_uuid, db.Column)
def test_database_build():
db.drop_all()
db.create_all()
def test_many_to_one_factory():
assert FakeTableManyToOne.faketable_id is not None
assert 'faketable' in FakeTableManyToOne.__dict__
assert AClassForTesting.__tablename__ == 'aclassfortesting'
assert 'InstrumentedAttribute' in str(type(AClassForTesting.faketable))
aclass = AClassForTesting()
db.session.add(aclass)
db.session.commit()
def test_ForeignKeyMixin():
FakeTableFK = fact.foreign_key_factory('faketable')
assert 'faketable_id' in AnotherFakeTable.__dict__
fk_obj = AnotherFakeTable.faketable_id
assert fk_obj
assert "InstrumentedAttribute" in str(type(fk_obj))
def test_OneToOneMixin():
db.drop_all()
db.create_all()
new_fake = FakeTable()
new_fake.non_unique_col = 'wwooo'
new_fake.unique_name = 'ft1'
db.session.add(new_fake)
db.session.commit()
ft1 = FakeTable.query.filter_by(unique_name='ft1').first()
yaft1 = YetAnotherFakeTable()
yaft1.unique_name = 'yaft1'
yaft1.faketable_id = ft1.id
db.session.add(yaft1)
db.session.commit()
ft1 = FakeTable.query.filter_by(unique_name='ft1').first()
yaft1 = YetAnotherFakeTable.query.filter_by(unique_name='yaft1').first()
assert YetAnotherFakeTable.faketable
assert YetAnotherFakeTable
assert yaft1.faketable == ft1
assert ft1.yetanotherfaketable == yaft1
yaft2 = YetAnotherFakeTable()
yaft2.unique_name = 'yaft2'
yaft2.faketable_id = ft1.id
db.session.add(yaft2)
db.session.commit()
assert ft1.yetanotherfaketable == yaft1
#assert ft1.yetanotherfaketable[1] == yaft2
db.drop_all()
def test_ManyToOneMixin():
FakeTableFK = fact.foreign_key_factory('faketable')
FakeTableManyToOne = fact.many_to_one_factory('FakeTable', FakeTableFK)
db.drop_all()
db.create_all()
assert YetAnotherFakeTableAgain
assert YetAnotherFakeTableAgain.faketable
new_fake = FakeTable()
new_fake.non_unique_col = 'wwooo'
new_fake.unique_name = 'ft1'
db.session.add(new_fake)
db.session.commit()
new_fake_saved = FakeTable.query.filter_by(unique_name='ft1').first()
assert new_fake_saved
newb1 = YetAnotherFakeTableAgain()
newb1.unique_name = 'yafta1'
newb1.faketable_id = new_fake_saved.id
db.session.add(newb1)
db.session.commit()
newb2 = YetAnotherFakeTableAgain()
newb2.unique_name = 'yafta2'
newb2.faketable_id = new_fake_saved.id
db.session.add(newb2)
db.session.commit()
count = YetAnotherFakeTableAgain.query.count()
assert count == 2
all_yafta = YetAnotherFakeTableAgain.query.all()
assert len(all_yafta) == 2
assert all_yafta[0].unique_name == 'yafta1'
assert all_yafta[0].faketable_id == 1
assert all_yafta[1].unique_name == 'yafta2'
assert all_yafta[1].faketable_id == 1
reloaded_fake_saved = FakeTable.query.filter_by(unique_name='ft1').first()
yafta1 = reloaded_fake_saved.yetanotherfaketableagain[0]
yafta2 = reloaded_fake_saved.yetanotherfaketableagain[1]
assert yafta1.unique_name == 'yafta1'
assert yafta2.unique_name == 'yafta2'
#assert one
#assert two
def test_simple_table_factory():
pass
|
|
from numba.core import utils, ir, analysis, transforms, ir_utils
class YieldPoint(object):
def __init__(self, block, inst):
assert isinstance(block, ir.Block)
assert isinstance(inst, ir.Yield)
self.block = block
self.inst = inst
self.live_vars = None
self.weak_live_vars = None
class GeneratorInfo(object):
def __init__(self):
# { index: YieldPoint }
self.yield_points = {}
# Ordered list of variable names
self.state_vars = []
def get_yield_points(self):
"""
Return an iterable of YieldPoint instances.
"""
return self.yield_points.values()
class VariableLifetime(object):
"""
For lazily building information of variable lifetime
"""
def __init__(self, blocks):
self._blocks = blocks
@utils.cached_property
def cfg(self):
return analysis.compute_cfg_from_blocks(self._blocks)
@utils.cached_property
def usedefs(self):
return analysis.compute_use_defs(self._blocks)
@utils.cached_property
def livemap(self):
return analysis.compute_live_map(self.cfg, self._blocks,
self.usedefs.usemap,
self.usedefs.defmap)
@utils.cached_property
def deadmaps(self):
return analysis.compute_dead_maps(self.cfg, self._blocks, self.livemap,
self.usedefs.defmap)
# other packages that define new nodes add calls for inserting dels
# format: {type:function}
ir_extension_insert_dels = {}
class PostProcessor(object):
"""
A post-processor for Numba IR.
"""
def __init__(self, func_ir):
self.func_ir = func_ir
def run(self, emit_dels=False, extend_lifetimes=False):
"""
Run the following passes over Numba IR:
- canonicalize the CFG
- emit explicit `del` instructions for variables
- compute lifetime of variables
- compute generator info (if function is a generator function)
"""
self.func_ir.blocks = transforms.canonicalize_cfg(self.func_ir.blocks)
vlt = VariableLifetime(self.func_ir.blocks)
self.func_ir.variable_lifetime = vlt
bev = analysis.compute_live_variables(vlt.cfg, self.func_ir.blocks,
vlt.usedefs.defmap,
vlt.deadmaps.combined)
for offset, ir_block in self.func_ir.blocks.items():
self.func_ir.block_entry_vars[ir_block] = bev[offset]
if self.func_ir.is_generator:
self.func_ir.generator_info = GeneratorInfo()
self._compute_generator_info()
else:
self.func_ir.generator_info = None
# Emit del nodes, do this last as the generator info parsing generates
# and then strips dels as part of its analysis.
if emit_dels:
self._insert_var_dels(extend_lifetimes=extend_lifetimes)
def _populate_generator_info(self):
"""
Fill `index` for the Yield instruction and create YieldPoints.
"""
dct = self.func_ir.generator_info.yield_points
assert not dct, 'rerunning _populate_generator_info'
for block in self.func_ir.blocks.values():
for inst in block.body:
if isinstance(inst, ir.Assign):
yieldinst = inst.value
if isinstance(yieldinst, ir.Yield):
index = len(dct) + 1
yieldinst.index = index
yp = YieldPoint(block, yieldinst)
dct[yieldinst.index] = yp
def _compute_generator_info(self):
"""
Compute the generator's state variables as the union of live variables
at all yield points.
"""
# generate del info, it's used in analysis here, strip it out at the end
self._insert_var_dels()
self._populate_generator_info()
gi = self.func_ir.generator_info
for yp in gi.get_yield_points():
live_vars = set(self.func_ir.get_block_entry_vars(yp.block))
weak_live_vars = set()
stmts = iter(yp.block.body)
for stmt in stmts:
if isinstance(stmt, ir.Assign):
if stmt.value is yp.inst:
break
live_vars.add(stmt.target.name)
elif isinstance(stmt, ir.Del):
live_vars.remove(stmt.value)
else:
assert 0, "couldn't find yield point"
# Try to optimize out any live vars that are deleted immediately
# after the yield point.
for stmt in stmts:
if isinstance(stmt, ir.Del):
name = stmt.value
if name in live_vars:
live_vars.remove(name)
weak_live_vars.add(name)
else:
break
yp.live_vars = live_vars
yp.weak_live_vars = weak_live_vars
st = set()
for yp in gi.get_yield_points():
st |= yp.live_vars
st |= yp.weak_live_vars
gi.state_vars = sorted(st)
self.remove_dels()
def _insert_var_dels(self, extend_lifetimes=False):
"""
Insert del statements for each variable.
Returns a 2-tuple of (variable definition map, variable deletion map)
which indicates variables defined and deleted in each block.
The algorithm avoids relying on explicit knowledge on loops and
distinguish between variables that are defined locally vs variables that
come from incoming blocks.
We start with simple usage (variable reference) and definition (variable
creation) maps on each block. Propagate the liveness info to predecessor
blocks until it stabilize, at which point we know which variables must
exist before entering each block. Then, we compute the end of variable
lives and insert del statements accordingly. Variables are deleted after
the last use. Variable referenced by terminators (e.g. conditional
branch and return) are deleted by the successors or the caller.
"""
vlt = self.func_ir.variable_lifetime
self._patch_var_dels(vlt.deadmaps.internal, vlt.deadmaps.escaping,
extend_lifetimes=extend_lifetimes)
def _patch_var_dels(self, internal_dead_map, escaping_dead_map,
extend_lifetimes=False):
"""
Insert delete in each block
"""
for offset, ir_block in self.func_ir.blocks.items():
# for each internal var, insert delete after the last use
internal_dead_set = internal_dead_map[offset].copy()
delete_pts = []
# for each statement in reverse order
for stmt in reversed(ir_block.body[:-1]):
# internal vars that are used here
live_set = set(v.name for v in stmt.list_vars())
dead_set = live_set & internal_dead_set
for T, def_func in ir_extension_insert_dels.items():
if isinstance(stmt, T):
done_dels = def_func(stmt, dead_set)
dead_set -= done_dels
internal_dead_set -= done_dels
# used here but not afterwards
delete_pts.append((stmt, dead_set))
internal_dead_set -= dead_set
# rewrite body and insert dels
body = []
lastloc = ir_block.loc
del_store = []
for stmt, delete_set in reversed(delete_pts):
# If using extended lifetimes then the Dels are all put at the
# block end just ahead of the terminator, so associate their
# location with the terminator.
if extend_lifetimes:
lastloc = ir_block.body[-1].loc
else:
lastloc = stmt.loc
# Ignore dels (assuming no user inserted deletes)
if not isinstance(stmt, ir.Del):
body.append(stmt)
# note: the reverse sort is not necessary for correctness
# it is just to minimize changes to test for now
for var_name in sorted(delete_set, reverse=True):
delnode = ir.Del(var_name, loc=lastloc)
if extend_lifetimes:
del_store.append(delnode)
else:
body.append(delnode)
if extend_lifetimes:
body.extend(del_store)
body.append(ir_block.body[-1]) # terminator
ir_block.body = body
# vars to delete at the start
escape_dead_set = escaping_dead_map[offset]
for var_name in sorted(escape_dead_set):
ir_block.prepend(ir.Del(var_name, loc=ir_block.body[0].loc))
def remove_dels(self):
"""
Strips the IR of Del nodes
"""
ir_utils.remove_dels(self.func_ir.blocks)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tensor class for computation declaration."""
# pylint: disable=invalid-name
import tvm._ffi
from tvm.runtime import Object, ObjectGeneric, convert_to_object
from tvm.tir import expr as _expr, DataProducer
from . import _ffi_api
class TensorSlice(ObjectGeneric, _expr.ExprOp):
"""Auxiliary data structure for enable slicing syntax from tensor."""
def __init__(self, tensor, indices):
if not isinstance(indices, tuple):
indices = (indices,)
self.tensor = tensor
self.indices = indices
def __getitem__(self, indices):
if not isinstance(indices, tuple):
indices = (indices,)
return TensorSlice(self.tensor, self.indices + indices)
def asobject(self):
"""Convert slice to object."""
return self.tensor(*self.indices)
@property
def dtype(self):
"""Data content of the tensor."""
return self.tensor.dtype
@tvm._ffi.register_object
class TensorIntrinCall(Object):
"""Intermediate structure for calling a tensor intrinsic."""
@tvm._ffi.register_object
class Tensor(DataProducer, _expr.ExprOp):
"""Tensor object, to construct, see function.Tensor"""
def __call__(self, *indices):
ndim = self.ndim
if len(indices) != ndim:
raise ValueError("Need to provide %d index in tensor slice" % ndim)
indices = convert_to_object(indices)
args = []
for x in indices:
if isinstance(x, _expr.PrimExpr):
args.append(x)
elif isinstance(x, _expr.IterVar):
args.append(x.var)
else:
raise ValueError("The indices must be expression")
return _expr.ProducerLoad(self, args)
def __getitem__(self, indices):
return TensorSlice(self, indices)
def __hash__(self):
return _ffi_api.TensorHash(self)
def __eq__(self, other):
if not isinstance(other, Tensor):
if isinstance(other, _expr.ExprOp):
return _expr.EqualOp(self, other)
return False
if self.ndim == 0 and other.ndim == 0:
raise ValueError(
"Equal == comparison among rank-0 tensor is ambiguous, "
"use Tensor.equal for content expression equvalence, "
"use Tensor.same_as for exact reference comparison"
)
return _ffi_api.TensorEqual(self, other)
@property
def ndim(self):
"""Dimension of the tensor."""
return len(self.shape)
@property
def axis(self):
"""Axis of the tensor."""
return self.__getattr__("axis")
@property
def op(self):
"""The corressponding :py:class:`Operation`."""
return self.__getattr__("op")
@property
def value_index(self):
"""The output value index the tensor corresponds to."""
return self.__getattr__("value_index")
@property
def shape(self):
"""The output shape of the tensor."""
return self.__getattr__("shape")
@property
def name(self):
op = self.op
if op.num_outputs == 1:
return op.name
return "%s.v%d" % (op.name, self.value_index)
class Operation(Object):
"""Represent an operation that generates a tensor"""
def output(self, index):
"""Get the index-th output of the operation
Parameters
----------
index : int
The index size.
Returns
-------
out : Tensor
The i-th output.
"""
return _ffi_api.OpGetOutput(self, index)
@property
def num_outputs(self):
"""Number of outputs from this op."""
return _ffi_api.OpNumOutputs(self)
@property
def input_tensors(self):
"""List of input tensors to this op."""
return _ffi_api.OpInputTensors(self)
@tvm._ffi.register_object
class PlaceholderOp(Operation):
"""Placeholder operation."""
@tvm._ffi.register_object
class BaseComputeOp(Operation):
"""Compute operation."""
@property
def axis(self):
"""Represent the IterVar axis, defined when it is a ComputeOp"""
return self.__getattr__("axis")
@property
def reduce_axis(self):
"""Represent axis of reductions, only defined when it is a ComputeOp"""
return self.__getattr__("reduce_axis")
@tvm._ffi.register_object
class ComputeOp(BaseComputeOp):
"""Scalar operation."""
@tvm._ffi.register_object
class TensorComputeOp(BaseComputeOp):
"""Tensor operation."""
@tvm._ffi.register_object
class ScanOp(Operation):
"""Scan operation."""
@property
def scan_axis(self):
"""Represent the scan axis, only defined when it is a ScanOp"""
return self.__getattr__("scan_axis")
@tvm._ffi.register_object
class ExternOp(Operation):
"""External operation."""
@tvm._ffi.register_object
class HybridOp(Operation):
"""Hybrid operation."""
@property
def axis(self):
"""Represent the IterVar axis, also defined when it is a HybridOp"""
return self.__getattr__("axis")
|
|
#!/usr/bin/env python3
from itertools import compress
import os
import sys
import time
from xfcs.FCSFile.FCSFile import FCSFile, channel_name_keywords
from xfcs.utils import metadata_csv, metadata_time, metadata_plot
from xfcs.utils.locator import locate_fcs_files
from xfcs.utils.metadata_stats import add_param_mean
from xfcs.version import VERSION
# ------------------------------------------------------------------------------
FORCED_SRC_KEYS = ('CSV_CREATED', 'SRC_DIR', 'SRC_FILE')
# ------------------------------ KEYWORD PREFS ---------------------------------
def read_kw_prefs(kw_filter_file):
"""Read user selected keywords from text file and insert forced src keys
if not included by user.
Arg:
kw_filter_file: filepath to user kw text file.
Returns:
user_meta_keys: iterable of fcs Parameter keys.
"""
user_meta_keys = None
with open(kw_filter_file, 'r') as kw_file:
user_meta_keys = [line.strip().upper() for line in kw_file if line.strip() != '']
for key in reversed(FORCED_SRC_KEYS):
if key not in user_meta_keys:
user_meta_keys.insert(0, key)
return user_meta_keys
def write_kw_prefs(meta_keys):
"""Write all located fcs Parameter keys to text file
Arg:
meta_keys: iterable of fcs metadata Parameter keys in order relative to
location in fcs file text section.
Returns:
kw_prefs_filename: name of generated text file
"""
kw_prefs_filename = 'FCS_USER_KW.txt'
with open(kw_prefs_filename, 'w') as kw_file:
for keyword in meta_keys:
kw_file.write('{}\n'.format(keyword))
return kw_prefs_filename
# ------------------------------------------------------------------------------
def load_metadata(paths, quiet=False):
"""
--> makes hashtable -> filepath : fcs file class instance
meta_keys == all_keys w any new keys extended
replaced -> meta_keys = ['FILEPATH'] with 'SRC_FILE'
Arg:
paths: iterable of fcs filepaths
Returns:
fcs_objs:
meta_keys:
"""
fcs_objs = []
meta_keys = []
meta_keys.extend(FORCED_SRC_KEYS)
for filepath in paths:
fcs = FCSFile(quiet)
fcs.load(filepath)
fcs.set_param('CSV_CREATED', time.strftime('%m/%d/%y %H:%M:%S'))
fcs.set_param('SRC_DIR', fcs.parentdir)
fcs.set_param('SRC_FILE', fcs.name)
meta_keys.extend((mk for mk in fcs.param_keys if mk not in meta_keys))
fcs_objs.append(fcs)
fcs.close()
return fcs_objs, meta_keys
# ------------------------------------------------------------------------------
def merge_metadata(fcs_objs, meta_keys, tidy, fn_out=''):
"""All fcs metadata written to one csv file.
Args:
fcs_objs: iterable of loaded FCSFile instances.
meta_keys: iterable of fcs metadata Parameter keys to use.
tidy: bool - enables tidy data format.
fn_out: optional filepath/name for csv file.
Returns:
csv_fn: filename of generated csv.
"""
if fn_out:
csv_fn = fn_out
else:
desc = '-t' if tidy else '-w'
curdir_name = os.path.basename(os.getcwd())
csv_fn = '{}_FCS_metadata{}.csv'.format(curdir_name, desc)
metadata_csv.write_file(fcs_objs, meta_keys, csv_fn, tidy)
return csv_fn
def fcs_to_csv_path(fcs_name, fcs_dir='', tidy=False):
"""Convert fcs filename to csv_metadata filename."""
desc = '-t' if tidy else '-w'
filename = fcs_name.split('.')[0]
csv_fn = '{}_metadata{}.csv'.format(filename, desc)
if fcs_dir:
csv_fn = os.path.join(fcs_dir, csv_fn)
return csv_fn
def write_obj_metadata(fcs):
meta_keys = list(FORCED_SRC_KEYS)
meta_keys.extend(fcs.param_keys)
csv_fn = fcs_to_csv_path(fcs.name, fcs.parentdir)
fcs.set_param('CSV_CREATED', time.strftime('%m/%d/%y %H:%M:%S'))
fcs.set_param('SRC_DIR', fcs.parentdir)
fcs.set_param('SRC_FILE', fcs.name)
metadata_csv.write_file((fcs,), meta_keys, csv_fn, tidy=False)
def batch_separate_metadata(fcs_objs, meta_keys, tidy):
"""Batch process all fcs to their own, separate csv file.
Args:
fcs_objs: iterable of loaded FCSFile instances.
meta_keys: iterable of fcs metadata Parameter keys to use.
tidy: bool - enables tidy data format.
Returns:
csv_paths: iterable of filepaths to generated csv files.
"""
csv_paths = []
for fcs in fcs_objs:
sep_keys = tuple(key for key in meta_keys if fcs.has_param(key))
csv_fn = fcs_to_csv_path(fcs.name, fcs.parentdir, tidy=tidy)
metadata_csv.write_file((fcs,), sep_keys, csv_fn, tidy)
csv_paths.append(csv_fn)
return csv_paths
# ------------------------------------------------------------------------------
def get_fcs_paths(in_paths, recursive, limit=0):
"""Locate and sort / limit fcs filepaths if not using --input arg.
Dir search, sorting and limit is disabled if in_paths is not empty.
In dir search, files are sorted by filename. If limit is enabled, an
attempt to sort via os.path.getctime is made. If this fails, further
sorting is attempted within main() based on sort_confirmed value.
Args:
in_paths: iterable of fcs paths from args.input, disables dir search.
recursive: bool - enables recursive dir search.
limit: int - concatenates located files if using dir search.
Returns:
fcs_paths: iterable of fcs filepaths.
sort_confirmed: bool - confirms attempt to sort paths by ctime.
"""
if in_paths:
fcs_paths = [infile.name for infile in in_paths if infile.name.lower().endswith('.fcs')]
else:
fcs_paths = locate_fcs_files(recursive)
sort_confirmed = True
if limit and not in_paths:
by_ctime = metadata_time.sort_by_ctime(fcs_paths)
if by_ctime:
fcs_paths = by_ctime[-limit:]
else:
sort_confirmed = False
return fcs_paths, sort_confirmed
# ------------------------------------------------------------------------------
def batch_load_fcs_from_csv(merge_keys, merge_data):
"""Init FCSFile instances using extracted metadata from csv file."""
merge_objs = []
for param_vals in merge_data:
fcs = FCSFile()
fcs.load_from_csv(merge_keys, param_vals)
merge_objs.append(fcs)
return merge_objs
def append_metadata(fcs_objs, meta_keys, master_csv, fn_out):
"""Append new fcs file(s) metadata to existing fcs metadata csv file.
USER_KW_PREFS is bypassed and keyword set from master csv acts as
keyword filter.
Args:
fcs_objs: iterable of metadata dicts.
meta_keys: all text param keywords located in new fcs files.
master_csv: filepath existing metadata csv file.
fn_out: output csv filepath - user can select new file for merging
insead of appending.
"""
merge_keys, merge_data, is_tidy = metadata_csv.read_file(master_csv, meta_keys)
if not all((merge_keys, merge_data)):
print('>>> No metadata keys match / data located')
return
csv_fcs_objs = batch_load_fcs_from_csv(merge_keys, merge_data)
# check duplicate fcs metadata entries
comparison_keys = [key for key in merge_keys if key in meta_keys]
csv_fcs_hashes = set(fcs.meta_hash(comparison_keys) for fcs in csv_fcs_objs)
incoming_hashes = [fcs.meta_hash(comparison_keys) for fcs in fcs_objs]
hash_filter = [md_hash not in csv_fcs_hashes for md_hash in incoming_hashes]
all_fcs_objs = []
all_fcs_objs.extend(csv_fcs_objs)
if not all(hash_filter):
unique_fcs = tuple(compress(fcs_objs, hash_filter))
if not unique_fcs:
print('>>> No unique fcs files to append to master csv')
return
else:
all_fcs_objs.extend(unique_fcs)
else:
all_fcs_objs.extend(fcs_objs)
if '$DATE' in merge_keys:
all_fcs_objs = metadata_time.sort_by_time_params(all_fcs_objs)
else:
all_fcs_objs.sort(key=lambda fcs: fcs.name)
merge_keys = add_param_mean(all_fcs_objs, merge_keys)
csv_out_path = merge_metadata(all_fcs_objs, merge_keys, is_tidy, fn_out)
print('>>> fcs metadata appended to: {}'.format(csv_out_path))
# ------------------------------------------------------------------------------
def main(args):
"""Main control for CLI metadata extraction.
fcs_objs: iterable of metadata dicts
meta_keys: all_keys in order + any new (calculated) keys at end
"""
paths, sort_confirmed = get_fcs_paths(args.input, args.recursive, args.limit)
print('>>> fcs files located:', len(paths))
if not paths:
sys.exit(0)
fcs_objs, meta_keys = load_metadata(paths, args.quiet)
# TODO: add arg to force param time sort?
if not sort_confirmed and not args.merge:
sorted_fcs = metadata_time.sort_by_time_params(fcs_objs)
if not sorted_fcs:
print('>>> Unable to access any time related metadata for fcs files.')
print('>>> Disable --limit option in command and manually list --input files.')
sys.exit(0)
fcs_objs = sorted_fcs[-args.limit:]
if args.get_kw:
kw_prefs_filename = write_kw_prefs(meta_keys)
print('>>> FCS Keyword file generated:', kw_prefs_filename)
elif args.merge:
master_csv = args.merge.name
fn_out = master_csv if not args.output else args.output.name
append_metadata(fcs_objs, meta_keys, master_csv, fn_out)
else:
check_user_mean_keys = False
if args.kw_filter:
meta_keys = read_kw_prefs(args.kw_filter.name)
check_user_mean_keys = any('_MEAN' in key for key in meta_keys)
elif args.spx_names:
name_keys = list(channel_name_keywords(meta_keys))
meta_keys = list(FORCED_SRC_KEYS)
meta_keys.extend(name_keys)
if args.sepfiles:
csv_paths = batch_separate_metadata(fcs_objs, meta_keys, args.tidy)
print('\n>>> csv files written: {}\n'.format(len(csv_paths)))
else:
if check_user_mean_keys:
meta_keys = add_param_mean(fcs_objs, meta_keys)
fn_out = '' if not args.output else args.output.name
csv_out_path = merge_metadata(fcs_objs, meta_keys, args.tidy, fn_out)
print('\n>>> csv file written to: {}\n'.format(csv_out_path))
if args.dashboard:
metadata_plot.dashboard(fcs_objs, meta_keys)
# ------------------------------------------------------------------------------
|
|
# -*- coding: utf-8 -*-
from typing import Iterable, Iterator, List, Sequence, Tuple, cast, Set
from multiset import Multiset
from ..expressions.expressions import (
Expression, Pattern, Operation, Symbol, SymbolWildcard, Wildcard, AssociativeOperation, CommutativeOperation, OneIdentityOperation
)
from ..expressions.constraints import Constraint
from ..expressions.substitution import Substitution
from ..expressions.functions import (
is_constant, preorder_iter_with_position, match_head, create_operation_expression, op_iter, op_len
)
from ..utils import (
VariableWithCount, commutative_sequence_variable_partition_iter, fixed_integer_vector_iter, weak_composition_iter,
generator_chain, optional_iter
)
from ._common import CommutativePatternsParts, check_one_identity
__all__ = ['match', 'match_anywhere']
def match(subject: Expression, pattern: Pattern) -> Iterator[Substitution]:
r"""Tries to match the given *pattern* to the given *subject*.
Yields each match in form of a substitution.
Parameters:
subject:
An subject to match.
pattern:
The pattern to match.
Yields:
All possible match substitutions.
Raises:
ValueError:
If the subject is not constant.
"""
if not is_constant(subject):
raise ValueError("The subject for matching must be constant.")
global_constraints = [c for c in pattern.constraints if not c.variables]
local_constraints = set(c for c in pattern.constraints if c.variables)
for subst in _match([subject], pattern.expression, Substitution(), local_constraints):
for constraint in global_constraints:
if not constraint(subst):
break
else:
yield subst
def match_anywhere(subject: Expression, pattern: Pattern) -> Iterator[Tuple[Substitution, Tuple[int, ...]]]:
"""Tries to match the given *pattern* to the any subexpression of the given *subject*.
Yields each match in form of a substitution and a position tuple.
The position is a tuple of indices, e.g. the empty tuple refers to the *subject* itself,
:code:`(0, )` refers to the first child (operand) of the subject, :code:`(0, 0)` to the first child of
the first child etc.
Parameters:
subject:
An subject to match.
pattern:
The pattern to match.
Yields:
All possible substitution and position pairs.
Raises:
ValueError:
If the subject is not constant.
"""
if not is_constant(subject):
raise ValueError("The subject for matching must be constant.")
for child, pos in preorder_iter_with_position(subject):
if match_head(child, pattern):
for subst in match(child, pattern):
yield subst, pos
def _match(subjects: List[Expression], pattern: Expression, subst: Substitution,
constraints: Set[Constraint]) -> Iterator[Substitution]:
match_iter = None
expr = subjects[0] if subjects else None
if isinstance(pattern, Wildcard):
# All size checks are already handled elsewhere
# When called directly from match, len(subjects) = 1
# The operation matching also already only assigns valid number of subjects to a wildcard
# So all we need to check here is the symbol type for SymbolWildcards
if isinstance(pattern, SymbolWildcard) and not isinstance(subjects[0], pattern.symbol_type):
return
match_iter = iter([subst])
if pattern.optional is not None and not subjects:
expr = pattern.optional
elif not pattern.fixed_size:
expr = tuple(subjects)
elif isinstance(pattern, Symbol):
if len(subjects) == 1 and isinstance(subjects[0], type(pattern)) and subjects[0].name == pattern.name:
match_iter = iter([subst])
elif isinstance(pattern, Operation):
if isinstance(pattern, OneIdentityOperation):
yield from _match_one_identity(subjects, pattern, subst, constraints)
if len(subjects) != 1 or not isinstance(subjects[0], pattern.__class__):
return
op_expr = cast(Operation, subjects[0])
# if not op_expr.symbols >= pattern.symbols:
# return
match_iter = _match_operation(op_expr, pattern, subst, constraints)
else:
if len(subjects) == 1 and subjects[0] == pattern:
match_iter = iter([subst])
if match_iter is not None:
if getattr(pattern, 'variable_name', False):
for new_subst in match_iter:
try:
if expr is None and getattr(pattern, 'optional', None) is not None:
expr = pattern.optional
new_subst = new_subst.union_with_variable(pattern.variable_name, expr)
except ValueError:
pass
else:
yield from _check_constraints(new_subst, constraints)
else:
yield from match_iter
def _check_constraints(substitution, constraints):
restore_constraints = set()
try:
for constraint in list(constraints):
for var in constraint.variables:
if var not in substitution:
break
else:
if not constraint(substitution):
break
restore_constraints.add(constraint)
constraints.remove(constraint)
else:
yield substitution
finally:
for constraint in restore_constraints:
constraints.add(constraint)
def _match_factory(subjects, operand, constraints):
def factory(subst):
yield from _match(subjects, operand, subst, constraints)
return factory
def _count_seq_vars(subjects, operation):
remaining = op_len(subjects)
sequence_var_count = 0
optional_count = 0
for operand in op_iter(operation):
if isinstance(operand, Wildcard):
if not operand.fixed_size or isinstance(operation, AssociativeOperation):
sequence_var_count += 1
if operand.optional is None:
remaining -= operand.min_count
elif operand.optional is not None:
optional_count += 1
else:
remaining -= operand.min_count
else:
remaining -= 1
if remaining < 0:
raise ValueError
return remaining, sequence_var_count, optional_count
def _build_full_partition(
optional_parts, sequence_var_partition: Sequence[int], subjects: Sequence[Expression], operation: Operation
) -> List[Sequence[Expression]]:
"""Distribute subject operands among pattern operands.
Given a partitoning for the variable part of the operands (i.e. a list of how many extra operands each sequence
variable gets assigned).
"""
i = 0
var_index = 0
opt_index = 0
result = []
for operand in op_iter(operation):
wrap_associative = False
if isinstance(operand, Wildcard):
count = operand.min_count if operand.optional is None else 0
if not operand.fixed_size or isinstance(operation, AssociativeOperation):
count += sequence_var_partition[var_index]
var_index += 1
wrap_associative = operand.fixed_size and operand.min_count
elif operand.optional is not None:
count = optional_parts[opt_index]
opt_index += 1
else:
count = 1
operand_expressions = list(op_iter(subjects))[i:i + count]
i += count
if wrap_associative and len(operand_expressions) > wrap_associative:
fixed = wrap_associative - 1
operand_expressions = tuple(operand_expressions[:fixed]) + (
create_operation_expression(operation, operand_expressions[fixed:]),
)
result.append(operand_expressions)
return result
def _non_commutative_match(subjects, operation, subst, constraints):
try:
remaining, sequence_var_count, optional_count = _count_seq_vars(subjects, operation)
except ValueError:
return
for new_remaining, optional in optional_iter(remaining, optional_count):
if new_remaining < 0:
continue
for part in weak_composition_iter(new_remaining, sequence_var_count):
partition = _build_full_partition(optional, part, subjects, operation)
factories = [_match_factory(e, o, constraints) for e, o in zip(partition, op_iter(operation))]
for new_subst in generator_chain(subst, *factories):
yield new_subst
def _match_one_identity(subjects, operation, subst, constraints):
non_optional, added_subst = check_one_identity(operation)
if non_optional is not None:
try:
new_subst = subst.union(added_subst)
except ValueError:
return
yield from _match(subjects, non_optional, new_subst, constraints)
def _match_operation(subjects, operation, subst, constraints):
if op_len(operation) == 0:
if op_len(subjects) == 0:
yield subst
return
if not isinstance(operation, CommutativeOperation):
yield from _non_commutative_match(subjects, operation, subst, constraints)
else:
parts = CommutativePatternsParts(type(operation), *op_iter(operation))
yield from _match_commutative_operation(subjects, parts, subst, constraints)
def _match_commutative_operation(
subject_operands: Iterable[Expression],
pattern: CommutativePatternsParts,
substitution: Substitution,
constraints
) -> Iterator[Substitution]:
subjects = Multiset(op_iter(subject_operands)) # type: Multiset
if not pattern.constant <= subjects:
return
subjects -= pattern.constant
rest_expr = pattern.rest + pattern.syntactic
needed_length = (
pattern.sequence_variable_min_length + pattern.fixed_variable_length + len(rest_expr) +
pattern.wildcard_min_length
)
if len(subjects) < needed_length:
return
fixed_vars = Multiset(pattern.fixed_variables) # type: Multiset[str]
for name, count in pattern.fixed_variables.items():
if name in substitution:
replacement = substitution[name]
if issubclass(pattern.operation, AssociativeOperation) and isinstance(replacement, pattern.operation):
needed_count = Multiset(op_iter(substitution[name])) # type: Multiset
else:
if isinstance(replacement, (tuple, list, Multiset)):
return
needed_count = Multiset({replacement: 1})
if count > 1:
needed_count *= count
if not needed_count <= subjects:
return
subjects -= needed_count
del fixed_vars[name]
factories = [_fixed_expr_factory(e, constraints) for e in rest_expr]
if not issubclass(pattern.operation, AssociativeOperation):
for name, count in fixed_vars.items():
min_count, symbol_type, default = pattern.fixed_variable_infos[name]
factory = _fixed_var_iter_factory(name, count, min_count, symbol_type, constraints, default)
factories.append(factory)
if pattern.wildcard_fixed is True:
factory = _fixed_var_iter_factory(None, 1, pattern.wildcard_min_length, None, constraints, None)
factories.append(factory)
else:
for name, count in fixed_vars.items():
min_count, symbol_type, default = pattern.fixed_variable_infos[name]
if symbol_type is not None:
factory = _fixed_var_iter_factory(name, count, min_count, symbol_type, constraints, default)
factories.append(factory)
for rem_expr, substitution in generator_chain((subjects, substitution), *factories):
sequence_vars = _variables_with_counts(pattern.sequence_variables, pattern.sequence_variable_infos)
if issubclass(pattern.operation, AssociativeOperation):
sequence_vars += _variables_with_counts(fixed_vars, pattern.fixed_variable_infos)
if pattern.wildcard_fixed is True:
sequence_vars += (VariableWithCount(None, 1, pattern.wildcard_min_length, None), )
if pattern.wildcard_fixed is False:
sequence_vars += (VariableWithCount(None, 1, pattern.wildcard_min_length, None), )
for sequence_subst in commutative_sequence_variable_partition_iter(Multiset(rem_expr), sequence_vars):
if issubclass(pattern.operation, AssociativeOperation):
for v in fixed_vars.distinct_elements():
if v not in sequence_subst:
continue
l = pattern.fixed_variable_infos[v].min_count
value = cast(Sequence, sequence_subst[v])
if isinstance(value, (list, tuple, Multiset)):
if len(value) > l:
normal = Multiset(list(value)[:l - 1])
wrapped = pattern.operation(*(value - normal))
normal.add(wrapped)
sequence_subst[v] = normal if l > 1 else next(iter(normal))
else:
assert len(value) == 1 and l == 1, "Fixed variables with length != 1 are not supported."
sequence_subst[v] = next(iter(value))
try:
result = substitution.union(sequence_subst)
except ValueError:
pass
else:
yield from _check_constraints(result, constraints)
def _variables_with_counts(variables, infos):
return tuple(
VariableWithCount(name, count, infos[name].min_count, infos[name].default)
for name, count in variables.items() if infos[name].type is None
)
def _fixed_expr_factory(expression, constraints):
def factory(data):
subjects, substitution = data
for expr in subjects.distinct_elements():
if match_head(expr, expression):
for subst in _match([expr], expression, substitution, constraints):
yield subjects - Multiset({expr: 1}), subst
return factory
def _fixed_var_iter_factory(variable_name, count, length, symbol_type, constraints, optional):
def factory(data):
subjects, substitution = data
if variable_name in substitution:
value = ([substitution[variable_name]]
if not isinstance(substitution[variable_name], (tuple, list, Multiset)) else substitution[variable_name])
if optional is not None and value == [optional]:
yield subjects, substitution
existing = Multiset(value) * count
if not existing <= subjects:
return
yield subjects - existing, substitution
else:
if optional is not None:
new_substitution = Substitution(substitution)
new_substitution[variable_name] = optional
yield subjects, new_substitution
if length == 1:
for expr, expr_count in subjects.items():
if expr_count >= count and (symbol_type is None or isinstance(expr, symbol_type)):
if variable_name is not None:
new_substitution = Substitution(substitution)
new_substitution[variable_name] = expr
for new_substitution in _check_constraints(new_substitution, constraints):
yield subjects - Multiset({expr: count}), new_substitution
else:
yield subjects - Multiset({expr: count}), substitution
else:
assert variable_name is None, "Fixed variables with length != 1 are not supported."
exprs_with_counts = list(subjects.items())
counts = tuple(c // count for _, c in exprs_with_counts)
for subset in fixed_integer_vector_iter(counts, length):
sub_counter = Multiset(dict((exprs_with_counts[i][0], c * count) for i, c in enumerate(subset)))
yield subjects - sub_counter, substitution
return factory
|
|
# -*- coding: utf-8 -*-
import unittest
from typing import Set, Tuple
from pybel import BELGraph
from pybel.constants import ANNOTATIONS
from pybel.dsl import BaseEntity, Protein
from pybel.struct.filters import (
and_edge_predicates,
concatenate_node_predicates,
count_passed_edge_filter,
count_passed_node_filter,
filter_edges,
get_nodes,
invert_edge_predicate,
)
from pybel.struct.filters.edge_predicate_builders import (
_annotation_dict_all_filter,
_annotation_dict_any_filter,
build_annotation_dict_all_filter,
build_annotation_dict_any_filter,
)
from pybel.struct.filters.edge_predicates import true_edge_predicate
from pybel.struct.filters.node_predicates import true_node_predicate
from pybel.struct.filters.typing import EdgeIterator
from pybel.testing.utils import n
def make_edge_iterator_set(it: EdgeIterator) -> Set[Tuple[BaseEntity, BaseEntity]]:
return {(u, v) for u, v, _ in it}
class TestNodeFilters(unittest.TestCase):
def setUp(self):
self.universe = BELGraph()
self.universe.add_edge(1, 2)
self.universe.add_edge(2, 3)
self.universe.add_edge(3, 7)
self.universe.add_edge(1, 4)
self.universe.add_edge(1, 5)
self.universe.add_edge(5, 6)
self.universe.add_edge(8, 2)
self.graph = BELGraph()
self.graph.add_edge(1, 2)
self.all_universe_nodes = {1, 2, 3, 4, 5, 6, 7, 8}
self.all_graph_nodes = {1, 2}
def test_no_node_filter_argument(self):
nodes = get_nodes(self.universe, [])
self.assertEqual(self.all_universe_nodes, nodes)
def test_keep_node_permissive(self):
nodes = get_nodes(self.universe, true_node_predicate)
self.assertEqual(self.all_universe_nodes, nodes)
def test_missing_node_filter(self):
nodes = get_nodes(self.universe, concatenate_node_predicates([]))
self.assertEqual(self.all_universe_nodes, nodes)
def test_concatenate_single_node_filter(self):
nodes = get_nodes(self.universe, [true_node_predicate])
self.assertEqual(self.all_universe_nodes, nodes)
def test_concatenate_multiple_node_filters(self):
def even(_, node) -> bool:
return node % 2 == 0
def big(_, node) -> bool:
return node > 3
nodes = get_nodes(self.universe, [even, big])
self.assertEqual({4, 6, 8}, nodes)
self.assertEqual(3, count_passed_node_filter(self.universe, [even, big]))
def test_no_edge_filter(self):
edges = make_edge_iterator_set(filter_edges(self.graph, []))
self.assertEqual({(1, 2)}, edges)
def test_keep_edge_permissive(self):
edges = make_edge_iterator_set(filter_edges(self.graph, true_edge_predicate))
self.assertEqual({(1, 2)}, edges)
def test_keep_edge_unpermissive(self):
keep_edge_restrictive = invert_edge_predicate(true_edge_predicate)
edges = make_edge_iterator_set(filter_edges(self.graph, keep_edge_restrictive))
self.assertEqual(set(), edges)
def test_missing_edge_filter(self):
edges = make_edge_iterator_set(filter_edges(self.graph, and_edge_predicates([])))
self.assertEqual(({(1, 2)}), edges)
def test_concatenate_single_edge_filter(self):
edges = make_edge_iterator_set(filter_edges(self.graph, [true_edge_predicate]))
self.assertEqual({(1, 2)}, edges)
def test_concatenate_multiple_edge_filter(self):
def has_odd_source(graph, u, v, k):
return u % 2 != 0
def has_even_target(graph, u, v, k):
return v % 2 == 0
edges = make_edge_iterator_set(filter_edges(self.universe, [has_odd_source, has_even_target]))
self.assertEqual({(1, 2), (1, 4), (5, 6)}, edges)
self.assertEqual(
3,
count_passed_edge_filter(self.universe, [has_odd_source, has_even_target]),
)
has_even_source = invert_edge_predicate(has_odd_source)
edges = make_edge_iterator_set(filter_edges(self.universe, has_even_source))
self.assertEqual({(2, 3), (8, 2)}, edges)
class TestEdgeFilters(unittest.TestCase):
def test_a(self):
self.assertTrue(_annotation_dict_any_filter({ANNOTATIONS: {"A": {"1", "2"}}}, {"A": {"1"}}))
self.assertTrue(_annotation_dict_any_filter({ANNOTATIONS: {"A": {"1", "2"}}}, {"A": {"1", "2"}}))
self.assertTrue(_annotation_dict_any_filter({ANNOTATIONS: {"A": {"1", "2"}}}, {"A": {"1", "2", "3"}}))
self.assertTrue(
_annotation_dict_any_filter({ANNOTATIONS: {"A": {"1", "2"}, "B": {"X"}}}, {"A": {"3"}, "B": {"X"}})
)
self.assertFalse(_annotation_dict_any_filter({ANNOTATIONS: {"A": {"1", "2"}}}, {"A": {"3"}}))
self.assertFalse(
_annotation_dict_any_filter({ANNOTATIONS: {"A": {"1", "2"}, "B": {"X"}}}, {"A": {"3"}, "B": {"Y"}})
)
def test_any_filter_no_query(self):
"""Test that the all filter returns true when there's no argument"""
graph = BELGraph()
graph.add_increases(Protein(n(), n()), Protein(n(), n()), citation=n(), evidence=n())
self.assertEqual(1, count_passed_edge_filter(graph, build_annotation_dict_any_filter({})))
def test_any_filter_no_annotations(self):
graph = BELGraph()
graph.add_increases(Protein(n(), n()), Protein(n(), n()), citation=n(), evidence=n())
self.assertEqual(
0,
count_passed_edge_filter(graph, build_annotation_dict_any_filter({"A": {"1"}})),
)
def test_any_filter_empty_annotations(self):
graph = BELGraph()
graph.add_increases(
Protein(n(), n()),
Protein(n(), n()),
citation=n(),
evidence=n(),
annotations={},
)
self.assertEqual(
0,
count_passed_edge_filter(graph, build_annotation_dict_any_filter({"A": {"1"}})),
)
def test_any_filter(self):
graph = BELGraph()
graph.annotation_list["A"] = set("12345")
graph.add_increases(
Protein(n(), n()),
Protein(n(), n()),
citation=n(),
evidence=n(),
annotations={"A": {"1", "2", "3"}},
)
self.assertEqual(
1,
count_passed_edge_filter(
graph,
build_annotation_dict_any_filter(graph._clean_annotations({"A": {"1"}})),
),
)
self.assertEqual(
1,
count_passed_edge_filter(
graph,
build_annotation_dict_any_filter(graph._clean_annotations({"A": {"1", "2"}})),
),
)
self.assertEqual(
1,
count_passed_edge_filter(
graph,
build_annotation_dict_any_filter(graph._clean_annotations({"A": {"1", "2", "3"}})),
),
)
def test_b(self):
self.assertTrue(_annotation_dict_all_filter({ANNOTATIONS: {"A": {"1"}}}, {"A": {"1"}}))
self.assertTrue(_annotation_dict_all_filter({ANNOTATIONS: {"A": {"1", "2"}}}, {"A": {"1", "2"}}))
self.assertTrue(_annotation_dict_all_filter({ANNOTATIONS: {"A": {"1", "2"}}}, {"A": {"1", "2"}}))
self.assertTrue(
_annotation_dict_all_filter(
{ANNOTATIONS: {"A": {"1", "2"}, "B": {"X"}}},
{"A": {"1", "2"}, "B": {"X"}},
)
)
self.assertFalse(
_annotation_dict_all_filter(
{ANNOTATIONS: {"A": {"1", "2"}, "B": {"X"}}},
{"A": {"1", "2", "3"}, "B": {"X", "Y"}},
)
)
self.assertFalse(_annotation_dict_all_filter({ANNOTATIONS: {"A": {"1"}}}, {"A": {"1", "2"}}))
self.assertFalse(_annotation_dict_all_filter({ANNOTATIONS: {"A": {"1"}}}, {"A": {"2"}}))
self.assertFalse(_annotation_dict_all_filter({ANNOTATIONS: {"A": {"1"}}}, {"B": {"1"}}))
def test_all_filter_no_query(self):
"""Test that the all filter returns true when there's no argument"""
graph = BELGraph()
graph.add_increases(Protein(n(), n()), Protein(n(), n()), citation=n(), evidence=n())
self.assertEqual(1, count_passed_edge_filter(graph, build_annotation_dict_all_filter({})))
def test_all_filter_no_annotations(self):
graph = BELGraph()
graph.add_increases(Protein(n(), n()), Protein(n(), n()), citation=n(), evidence=n())
self.assertEqual(
0,
count_passed_edge_filter(graph, build_annotation_dict_all_filter({"A": {"1"}})),
)
def test_all_filter_empty_annotations(self):
graph = BELGraph()
graph.add_increases(
Protein(n(), n()),
Protein(n(), n()),
citation=n(),
evidence=n(),
annotations={},
)
self.assertEqual(
0,
count_passed_edge_filter(graph, build_annotation_dict_all_filter({"A": {"1"}})),
)
def test_all_filter(self):
graph = BELGraph()
graph.annotation_list["A"] = set("12345")
graph.add_increases(
Protein(n(), n()),
Protein(n(), n()),
citation=n(),
evidence=n(),
annotations={
"A": {"1", "2", "3"},
},
)
self.assertEqual(
1,
count_passed_edge_filter(
graph,
build_annotation_dict_all_filter(graph._clean_annotations({"A": {"1"}})),
),
)
self.assertEqual(
1,
count_passed_edge_filter(
graph,
build_annotation_dict_all_filter(graph._clean_annotations({"A": {"1", "2"}})),
),
)
self.assertEqual(
1,
count_passed_edge_filter(
graph,
build_annotation_dict_all_filter(graph._clean_annotations({"A": {"1", "2", "3"}})),
),
)
self.assertEqual(
0,
count_passed_edge_filter(
graph,
build_annotation_dict_all_filter(graph._clean_annotations({"A": {"1", "2", "3", "4"}})),
),
)
self.assertEqual(
0,
count_passed_edge_filter(
graph,
build_annotation_dict_all_filter(graph._clean_annotations({"A": {"4"}})),
),
)
def test_all_filter_dict(self):
graph = BELGraph()
graph.annotation_list["A"] = set("12345")
a, b = Protein(namespace="hgnc", identifier="1", name="A"), Protein(namespace="hgnc", identifier="2", name="B")
graph.add_increases(
a,
b,
citation=n(),
evidence=n(),
annotations={
"A": {"1", "2", "3"},
},
)
self.assertEqual(
1,
count_passed_edge_filter(
graph,
build_annotation_dict_all_filter(graph._clean_annotations({"A": {"1": True}})),
),
)
self.assertEqual(
1,
count_passed_edge_filter(
graph,
build_annotation_dict_all_filter(graph._clean_annotations({"A": {"1": True, "2": True}})),
),
)
self.assertEqual(
1,
count_passed_edge_filter(
graph,
build_annotation_dict_all_filter(graph._clean_annotations({"A": {"1": True, "2": True, "3": True}})),
),
)
self.assertEqual(
0,
count_passed_edge_filter(
graph,
build_annotation_dict_all_filter(
graph._clean_annotations({"A": {"1": True, "2": True, "3": True, "4": True}})
),
),
)
self.assertEqual(
0,
count_passed_edge_filter(
graph,
build_annotation_dict_all_filter(graph._clean_annotations({"A": {"4": True}})),
),
)
|
|
#!/usr/bin/env python
"""
Copyright (c) 2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
try:
from queue import Queue
except ImportError:
from Queue import Queue
import axis_ep
import ll_ep
module = 'll_axis_bridge'
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("test_%s.v" % module)
src = ' '.join(srcs)
build_cmd = "iverilog -o test_%s.vvp %s" % (module, src)
def dut_ll_axis_bridge(clk,
rst,
current_test,
ll_data_in,
ll_sof_in_n,
ll_eof_in_n,
ll_src_rdy_in_n,
ll_dst_rdy_out_n,
axis_tdata,
axis_tvalid,
axis_tready,
axis_tlast):
os.system(build_cmd)
return Cosimulation("vvp -m myhdl test_%s.vvp -lxt2" % module,
clk=clk,
rst=rst,
current_test=current_test,
ll_data_in=ll_data_in,
ll_sof_in_n=ll_sof_in_n,
ll_eof_in_n=ll_eof_in_n,
ll_src_rdy_in_n=ll_src_rdy_in_n,
ll_dst_rdy_out_n=ll_dst_rdy_out_n,
axis_tdata=axis_tdata,
axis_tvalid=axis_tvalid,
axis_tready=axis_tready,
axis_tlast=axis_tlast)
def bench():
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
ll_data_in = Signal(intbv(0)[8:])
ll_sof_in_n = Signal(bool(1))
ll_eof_in_n = Signal(bool(1))
ll_src_rdy_in_n = Signal(bool(1))
axis_tready = Signal(bool(0))
# Outputs
axis_tdata = Signal(intbv(0)[8:])
axis_tvalid = Signal(bool(0))
axis_tlast = Signal(bool(0))
ll_dst_rdy_out_n = Signal(bool(1))
# sources and sinks
source_queue = Queue()
source_pause = Signal(bool(0))
sink_queue = Queue()
sink_pause = Signal(bool(0))
source = ll_ep.LocalLinkSource(clk,
rst,
data_out=ll_data_in,
sof_out_n=ll_sof_in_n,
eof_out_n=ll_eof_in_n,
src_rdy_out_n=ll_src_rdy_in_n,
dst_rdy_in_n=ll_dst_rdy_out_n,
fifo=source_queue,
pause=source_pause,
name='source')
sink = axis_ep.AXIStreamSink(clk,
rst,
tdata=axis_tdata,
tvalid=axis_tvalid,
tready=axis_tready,
tlast=axis_tlast,
fifo=sink_queue,
pause=sink_pause,
name='sink')
# DUT
dut = dut_ll_axis_bridge(clk,
rst,
current_test,
ll_data_in,
ll_sof_in_n,
ll_eof_in_n,
ll_src_rdy_in_n,
ll_dst_rdy_out_n,
axis_tdata,
axis_tvalid,
axis_tready,
axis_tlast)
@always(delay(4))
def clkgen():
clk.next = not clk
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
yield clk.posedge
yield clk.posedge
print("test 1: test packet")
current_test.next = 1
source_queue.put(bytearray(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10'))
yield clk.posedge
yield axis_tlast.negedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert bytearray(rx_frame) == (b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
yield delay(100)
yield clk.posedge
print("test 2: test packet with pauses")
current_test.next = 2
source_queue.put(bytearray(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10'))
yield clk.posedge
yield delay(64)
yield clk.posedge
source_pause.next = True
yield delay(32)
yield clk.posedge
source_pause.next = False
yield delay(64)
yield clk.posedge
sink_pause.next = True
yield delay(32)
yield clk.posedge
sink_pause.next = False
yield axis_tlast.negedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert bytearray(rx_frame) == (b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
yield delay(100)
raise StopSimulation
return dut, source, sink, clkgen, check
def test_bench():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
|
|
# Copyright (c) 2014, Menno Smits
# Released subject to the New BSD License
# Please see http://en.wikipedia.org/wiki/BSD_licenses
from __future__ import unicode_literals
import itertools
import socket
import sys
from datetime import datetime
from mock import patch, sentinel, Mock
from imapclient import six
from imapclient.fixed_offset import FixedOffset
from .testable_imapclient import TestableIMAPClient as IMAPClient
from .imapclient_test import IMAPClientTest
class TestListFolders(IMAPClientTest):
def test_list_folders(self):
self.client._imap._simple_command.return_value = ('OK', [b'something'])
self.client._imap._untagged_response.return_value = ('LIST', sentinel.folder_data)
self.client._proc_folder_list = Mock(return_value=sentinel.folder_list)
folders = self.client.list_folders('foo', 'bar')
self.client._imap._simple_command.assert_called_once_with(
'LIST', b'"foo"', b'"bar"')
self.assertEqual(self.client._proc_folder_list.call_args, ((sentinel.folder_data,), {}))
self.assertTrue(folders is sentinel.folder_list)
def test_list_sub_folders(self):
self.client._imap._simple_command.return_value = ('OK', [b'something'])
self.client._imap._untagged_response.return_value = ('LSUB', sentinel.folder_data)
self.client._proc_folder_list = Mock(return_value=sentinel.folder_list)
folders = self.client.list_sub_folders('foo', 'bar')
self.client._imap._simple_command.assert_called_once_with(
'LSUB', b'"foo"', b'"bar"')
self.assertEqual(self.client._proc_folder_list.call_args, ((sentinel.folder_data,), {}))
self.assertTrue(folders is sentinel.folder_list)
def test_list_folders_NO(self):
self.client._imap._simple_command.return_value = ('NO', [b'badness'])
self.assertRaises(IMAPClient.Error, self.client.list_folders)
def test_list_sub_folders_NO(self):
self.client._imap._simple_command.return_value = ('NO', [b'badness'])
self.assertRaises(IMAPClient.Error, self.client.list_folders)
def test_utf7_decoding(self):
self.client._imap._simple_command.return_value = ('OK', [b'something'])
self.client._imap._untagged_response.return_value = (
b'LIST', [
b'(\\HasNoChildren) "/" "A"',
b'(\\HasNoChildren) "/" "Hello&AP8-world"',
])
folders = self.client.list_folders('foo', 'bar')
self.client._imap._simple_command.assert_called_once_with('LIST', b'"foo"', b'"bar"')
self.assertEqual(folders, [((b'\\HasNoChildren',), b'/', 'A'),
((b'\\HasNoChildren',), b'/', 'Hello\xffworld')])
def test_folder_encode_off(self):
self.client.folder_encode = False
self.client._imap._simple_command.return_value = ('OK', [b'something'])
self.client._imap._untagged_response.return_value = (
b'LIST', [
b'(\\HasNoChildren) "/" "A"',
b'(\\HasNoChildren) "/" "Hello&AP8-world"',
])
folders = self.client.list_folders('foo', 'bar')
self.client._imap._simple_command.assert_called_once_with('LIST', '"foo"', '"bar"')
self.assertEqual(folders, [((b'\\HasNoChildren',), b'/', b'A'),
((b'\\HasNoChildren',), b'/', b'Hello&AP8-world')])
def test_simple(self):
folders = self.client._proc_folder_list([b'(\\HasNoChildren) "/" "A"',
b'(\\HasNoChildren) "/" "Foo Bar"',
])
self.assertEqual(folders, [((b'\\HasNoChildren',), b'/', 'A',),
((b'\\HasNoChildren',), b'/', 'Foo Bar')])
def test_without_quotes(self):
folders = self.client._proc_folder_list([b'(\\HasNoChildren) "/" A',
b'(\\HasNoChildren) "/" B',
b'(\\HasNoChildren) "/" C',
])
self.assertEqual(folders, [((b'\\HasNoChildren',), b'/', 'A'),
((b'\\HasNoChildren',), b'/', 'B'),
((b'\\HasNoChildren',), b'/', 'C')])
def test_unquoted_numeric_folder_name(self):
# Some IMAP implementations do this
folders = self.client._proc_folder_list([b'(\\HasNoChildren) "/" 123'])
self.assertEqual(folders, [((b'\\HasNoChildren',), b'/', '123')])
def test_mixed(self):
folders = self.client._proc_folder_list([b'(\\HasNoChildren) "/" Alpha',
b'(\\HasNoChildren) "/" "Foo Bar"',
b'(\\HasNoChildren) "/" C',
])
self.assertEqual(folders, [((b'\\HasNoChildren',), b'/', 'Alpha'),
((b'\\HasNoChildren',), b'/', 'Foo Bar'),
((b'\\HasNoChildren',), b'/', 'C')])
def test_funky_characters(self):
folders = self.client._proc_folder_list([(b'(\\NoInferiors \\UnMarked) "/" {5}', 'bang\xff'),
b'',
b'(\\HasNoChildren \\UnMarked) "/" "INBOX"'])
self.assertEqual(folders, [((b'\\NoInferiors', b'\\UnMarked'), b"/", 'bang\xff'),
((b'\\HasNoChildren', b'\\UnMarked'), b"/", 'INBOX')])
def test_quoted_specials(self):
folders = self.client._proc_folder_list([br'(\HasNoChildren) "/" "Test \"Folder\""',
br'(\HasNoChildren) "/" "Left\"Right"',
br'(\HasNoChildren) "/" "Left\\Right"',
br'(\HasNoChildren) "/" "\"Left Right\""',
br'(\HasNoChildren) "/" "\"Left\\Right\""',
])
self.assertEqual(folders, [((b'\\HasNoChildren',), b'/', 'Test "Folder"'),
((b'\\HasNoChildren',), b'/', 'Left\"Right'),
((b'\\HasNoChildren',), b'/', r'Left\Right'),
((b'\\HasNoChildren',), b'/', r'"Left Right"'),
((b'\\HasNoChildren',), b'/', r'"Left\Right"'),
])
def test_empty_response(self):
self.assertEqual(self.client._proc_folder_list([None]), [])
def test_blanks(self):
folders = self.client._proc_folder_list(['', None, br'(\HasNoChildren) "/" "last"'])
self.assertEqual(folders, [((br'\HasNoChildren',), b'/', 'last')])
class TestSelectFolder(IMAPClientTest):
def test_normal(self):
self.client._command_and_check = Mock()
self.client._imap.untagged_responses = {
b'exists': [b'3'],
b'FLAGS': [br"(\Flagged \Deleted abc [foo]/bar def)"],
b'HIGHESTMODSEQ': [b'127110'],
b'OK': [br"[PERMANENTFLAGS (\Flagged \Deleted abc [foo]/bar def \*)] Flags permitted.",
b'[UIDVALIDITY 631062293] UIDs valid.',
b'[UIDNEXT 1281] Predicted next UID.',
b'[HIGHESTMODSEQ 127110]'],
b'PERMANENTFLAGS': [br'(\Flagged \Deleted abc [foo'],
b'READ-WRITE': [b''],
b'RECENT': [b'0'],
b'UIDNEXT': [b'1281'],
b'UIDVALIDITY': [b'631062293'],
b'OTHER': [b'blah']
}
result = self.client.select_folder(b'folder_name', sentinel.readonly)
self.client._command_and_check.assert_called_once_with('select',
b'"folder_name"',
sentinel.readonly)
self.maxDiff = 99999
self.assertEqual(result, {
b'EXISTS': 3,
b'RECENT': 0,
b'UIDNEXT': 1281,
b'UIDVALIDITY': 631062293,
b'HIGHESTMODSEQ': 127110,
b'FLAGS': (br'\Flagged', br'\Deleted', b'abc', b'[foo]/bar', b'def'),
b'PERMANENTFLAGS': (br'\Flagged', br'\Deleted', b'abc', b'[foo]/bar', b'def', br'\*'),
b'READ-WRITE': True,
b'OTHER': [b'blah']
})
class TestAppend(IMAPClientTest):
def test_without_msg_time(self):
self.client._imap.append.return_value = ('OK', [b'Good'])
msg = 'hi'
self.client.append('foobar', msg, ['FLAG', 'WAVE'], None)
self.client._imap.append.assert_called_with(
b'"foobar"', '(FLAG WAVE)', None, b'hi')
@patch('imapclient.imapclient.datetime_to_imap')
def test_with_msg_time(self, datetime_to_imap):
datetime_to_imap.return_value = 'somedate'
self.client._imap.append.return_value = ('OK', [b'Good'])
msg = b'bye'
self.client.append('foobar', msg, ['FLAG', 'WAVE'],
datetime(2009, 4, 5, 11, 0, 5, 0, FixedOffset(2*60)))
self.assertTrue(datetime_to_imap.called)
self.client._imap.append.assert_called_with(
b'"foobar"', '(FLAG WAVE)', '"somedate"', msg)
class TestAclMethods(IMAPClientTest):
def test_getacl(self):
self.client._imap.getacl.return_value = ('OK', [b'INBOX Fred rwipslda Sally rwip'])
acl = self.client.getacl('INBOX')
self.assertSequenceEqual(acl, [(b'Fred', b'rwipslda'), (b'Sally', b'rwip')])
def test_setacl(self):
self.client._imap.setacl.return_value = ('OK', [b"SETACL done"])
response = self.client.setacl('folder', sentinel.who, sentinel.what)
self.client._imap.setacl.assert_called_with(b'"folder"',
sentinel.who,
sentinel.what)
self.assertEqual(response, b"SETACL done")
class TestIdleAndNoop(IMAPClientTest):
def test_idle(self):
self.client._imap._command.return_value = sentinel.tag
self.client._imap._get_response.return_value = None
self.client.idle()
self.client._imap._command.assert_called_with('IDLE')
self.assertEqual(self.client._idle_tag, sentinel.tag)
@patch('imapclient.imapclient.select.select')
def test_idle_check_blocking(self, mock_select):
mock_sock = Mock()
self.client._imap.sock = self.client._imap.sslobj = mock_sock
mock_select.return_value = ([True], [], [])
counter = itertools.count()
def fake_get_line():
count = six.next(counter)
if count == 0:
return b'* 1 EXISTS'
elif count == 1:
return b'* 0 EXPUNGE'
else:
raise socket.timeout
self.client._imap._get_line = fake_get_line
responses = self.client.idle_check()
mock_select.assert_called_once_with([mock_sock], [], [], None)
self.assertListEqual(mock_sock.method_calls,
[('setblocking', (0,), {}),
('setblocking', (1,), {})])
self.assertListEqual([(1, b'EXISTS'), (0, b'EXPUNGE')], responses)
@patch('imapclient.imapclient.select.select')
def test_idle_check_timeout(self, mock_select):
mock_sock = Mock()
self.client._imap.sock = self.client._imap.sslobj = mock_sock
mock_select.return_value = ([], [], [])
responses = self.client.idle_check(timeout=0.5)
mock_select.assert_called_once_with([mock_sock], [], [], 0.5)
self.assertListEqual(mock_sock.method_calls,
[('setblocking', (0,), {}),
('setblocking', (1,), {})])
self.assertListEqual([], responses)
@patch('imapclient.imapclient.select.select')
def test_idle_check_with_data(self, mock_select):
mock_sock = Mock()
self.client._imap.sock = self.client._imap.sslobj = mock_sock
mock_select.return_value = ([True], [], [])
counter = itertools.count()
def fake_get_line():
count = six.next(counter)
if count == 0:
return b'* 99 EXISTS'
else:
raise socket.timeout
self.client._imap._get_line = fake_get_line
responses = self.client.idle_check()
mock_select.assert_called_once_with([mock_sock], [], [], None)
self.assertListEqual(mock_sock.method_calls,
[('setblocking', (0,), {}),
('setblocking', (1,), {})])
self.assertListEqual([(99, b'EXISTS')], responses)
def test_idle_done(self):
self.client._idle_tag = sentinel.tag
mockSend = Mock()
self.client._imap.send = mockSend
mockConsume = Mock(return_value=sentinel.out)
self.client._consume_until_tagged_response = mockConsume
result = self.client.idle_done()
mockSend.assert_called_with(b'DONE\r\n')
mockConsume.assert_called_with(sentinel.tag, 'IDLE')
self.assertEqual(result, sentinel.out)
def test_noop(self):
mockCommand = Mock(return_value=sentinel.tag)
self.client._imap._command = mockCommand
mockConsume = Mock(return_value=sentinel.out)
self.client._consume_until_tagged_response = mockConsume
result = self.client.noop()
mockCommand.assert_called_with('NOOP')
mockConsume.assert_called_with(sentinel.tag, 'NOOP')
self.assertEqual(result, sentinel.out)
def test_consume_until_tagged_response(self):
client = self.client
client._imap.tagged_commands = {sentinel.tag: None}
counter = itertools.count()
def fake_get_response():
count = six.next(counter)
if count == 0:
return b'* 99 EXISTS'
client._imap.tagged_commands[sentinel.tag] = ('OK', [b'Idle done'])
client._imap._get_response = fake_get_response
text, responses = client._consume_until_tagged_response(sentinel.tag, b'IDLE')
self.assertEqual(client._imap.tagged_commands, {})
self.assertEqual(text, b'Idle done')
self.assertListEqual([(99, b'EXISTS')], responses)
class TestDebugLogging(IMAPClientTest):
def test_default_is_stderr(self):
self.assertIs(self.client.log_file, sys.stderr)
def test_IMAP_is_patched(self):
log = six.StringIO()
self.client.log_file = log
self.client._log('one')
self.client._imap._mesg('two')
output = log.getvalue()
self.assertIn('one', output)
self.assertIn('two', output)
class TestTimeNormalisation(IMAPClientTest):
def test_default(self):
self.assertTrue(self.client.normalise_times)
@patch('imapclient.imapclient.parse_fetch_response')
def test_pass_through(self, parse_fetch_response):
self.client._imap._command_complete.return_value = ('OK', sentinel.data)
self.client._imap._untagged_response.return_value = ('OK', sentinel.fetch_data)
self.client.use_uid = sentinel.use_uid
def check(expected):
self.client.fetch(22, ['SOMETHING'])
parse_fetch_response.assert_called_with(sentinel.fetch_data,
expected,
sentinel.use_uid)
self.client.normalise_times = True
check(True)
self.client.normalise_times = False
check(False)
class TestGmailLabels(IMAPClientTest):
def setUp(self):
super(TestGmailLabels, self).setUp()
patcher = patch.object(self.client, '_store', autospec=True, return_value=sentinel.label_set)
patcher.start()
self.addCleanup(patcher.stop)
def test_get(self):
with patch.object(self.client, 'fetch', autospec=True,
return_value={123: {b'X-GM-LABELS': [b'foo', b'bar']},
444: {b'X-GM-LABELS': [b'foo']}}):
out = self.client.get_gmail_labels(sentinel.messages)
self.client.fetch.assert_called_with(sentinel.messages, [b'X-GM-LABELS'])
self.assertEqual(out, {123: [b'foo', b'bar'],
444: [b'foo']})
def test_add(self):
self.client.add_gmail_labels(sentinel.messages, sentinel.labels)
self.client._store.assert_called_with(b'+X-GM-LABELS', sentinel.messages, sentinel.labels, b'X-GM-LABELS')
def test_remove(self):
self.client.remove_gmail_labels(sentinel.messages, sentinel.labels)
self.client._store.assert_called_with(b'-X-GM-LABELS', sentinel.messages, sentinel.labels, b'X-GM-LABELS')
def test_set(self):
self.client.set_gmail_labels(sentinel.messages, sentinel.labels)
self.client._store.assert_called_with(b'X-GM-LABELS', sentinel.messages, sentinel.labels, b'X-GM-LABELS')
class TestNamespace(IMAPClientTest):
def set_return(self, value):
self.client._imap.namespace.return_value = ('OK', [value])
def test_simple(self):
self.set_return(b'(("FOO." "/")) NIL NIL')
self.assertEqual(self.client.namespace(), ((('FOO.', '/'),), None, None))
def test_folder_decoding(self):
self.set_return(b'(("&AP8-." "/")) NIL NIL')
self.assertEqual(self.client.namespace(), ((('\xff.', '/'),), None, None))
def test_without_folder_decoding(self):
self.set_return(b'(("&AP8-." "/")) NIL NIL')
self.client.folder_encode = False
self.assertEqual(self.client.namespace(), (((b'&AP8-.', '/'),), None, None))
def test_other_only(self):
self.set_return(b'NIL NIL (("" "."))')
self.assertEqual(self.client.namespace(), (None, None, (("", "."),)))
def test_complex(self):
self.set_return(b'(("" "/")) '
b'(("~" "/")) '
b'(("#shared/" "/") ("#public/" "/")("#ftp/" "/")("#news." "."))')
self.assertEqual(self.client.namespace(), (
(("", "/"),),
(("~", "/"),),
(("#shared/", "/"), ("#public/", "/"), ("#ftp/", "/"), ("#news.", ".")),
))
class TestCapabilities(IMAPClientTest):
def test_preauth(self):
self.client._imap.capabilities = ('FOO', 'BAR')
self.client._imap.untagged_responses = {}
self.assertEqual(self.client.capabilities(), (b'FOO', b'BAR'))
def test_server_returned_capability_after_auth(self):
self.client._imap.capabilities = (b'FOO',)
self.client._imap.untagged_responses = {'CAPABILITY': [b'FOO MORE']}
self.assertEqual(self.client._cached_capabilities, None)
self.assertEqual(self.client.capabilities(), (b'FOO', b'MORE'))
self.assertEqual(self.client._cached_capabilities, (b'FOO', b'MORE'))
def test_caching(self):
self.client._imap.capabilities = ('FOO',)
self.client._imap.untagged_responses = {}
self.client._cached_capabilities = (b'FOO', b'MORE')
self.assertEqual(self.client.capabilities(), (b'FOO', b'MORE'))
def test_post_auth_request(self):
self.client._imap.capabilities = ('FOO',)
self.client._imap.untagged_responses = {}
self.client._imap.state = 'SELECTED'
self.client._imap.capability.return_value = ('OK', [b'FOO BAR'])
self.assertEqual(self.client.capabilities(), (b'FOO', b'BAR'))
self.assertEqual(self.client._cached_capabilities, (b'FOO', b'BAR'))
def test_has_capability(self):
self.client._cached_capabilities = (b'FOO', b'MORE')
self.assertTrue(self.client.has_capability(b'FOO'))
self.assertTrue(self.client.has_capability(b'foo'))
self.assertFalse(self.client.has_capability(b'BAR'))
self.assertTrue(self.client.has_capability('FOO'))
self.assertTrue(self.client.has_capability('foo'))
self.assertFalse(self.client.has_capability('BAR'))
class TestThread(IMAPClientTest):
def test_thread_without_uid(self):
self.client._cached_capabilities = (b'THREAD=REFERENCES',)
self.client.use_uid = False
self.client._imap.thread.return_value = ('OK', [b'(1 2)(3)(4 5 6)'])
threads = self.client.thread()
self.assertSequenceEqual(threads, ((1, 2), (3,), (4, 5, 6)))
def test_thread_with_uid(self):
self.client._cached_capabilities = (b'THREAD=REFERENCES',)
self.client.use_uid = True
self.client._imap.uid.return_value = ('OK', [b'(1 2)(3)(4 5 6)'])
threads = self.client.thread()
self.assertSequenceEqual(threads, ((1, 2), (3,), (4, 5, 6)))
def test_no_support(self):
self.client._cached_capabilities = (b'NOT-THREAD',)
self.assertRaises(ValueError, self.client.thread)
def test_no_support2(self):
self.client._cached_capabilities = (b'THREAD=FOO',)
self.assertRaises(ValueError, self.client.thread)
def test_all_args_with_uid(self):
self.client._cached_capabilities = (b'THREAD=FOO',)
self.client._imap.uid.return_value = ('OK', [])
self.client.thread(algorithm='FOO', criteria='STUFF', charset='ASCII')
self.client._imap.uid.assert_called_once_with('thread', b'FOO', b'ASCII', '(STUFF)')
def test_all_args_without_uid(self):
self.client.use_uid = False
self.client._cached_capabilities = (b'THREAD=FOO',)
self.client._imap.thread.return_value = ('OK', [])
self.client.thread(algorithm='FOO', criteria='STUFF', charset='ASCII')
self.client._imap.thread.assert_called_once_with(b'FOO', b'ASCII', '(STUFF)')
|
|
import unittest
import time
from AerospikeClientMock import AerospikeClientMock
class TestAerospikeClientTtlMock(unittest.TestCase):
def setUp(self):
pass
def get_time(self, ttl=0):
return int(time.time()) + ttl
def test_blank_init(self):
asm = AerospikeClientMock(default_ttl=2)
self.assertEqual({}, asm.dump())
def test_connected(self):
asm = AerospikeClientMock(default_ttl=2)
self.assertTrue(asm.is_connected())
def test_put(self):
default_ttl = 2
asm = AerospikeClientMock(default_ttl=2)
key = ("a", "b", "c")
asm.put(key, {"a": 1})
self.assertEqual({('a', 'b', 'c'): {'a': 1}}, asm.dump())
self.assertEqual(
(
('a', 'b', 'c'),
{'gen': 1, 'ttl': self.get_time(default_ttl)},
{'a': 1}
), asm.get(key))
time.sleep(1)
asm.put(key, {"a": 1})
self.assertEqual(
(
('a', 'b', 'c'),
{'gen': 2, 'ttl': self.get_time(default_ttl)},
{'a': 1}
), asm.get(key))
asm.put(key, {"a": 1}, meta={"ttl": 1})
self.assertEqual(
(('a', 'b', 'c'), {'gen': 3, 'ttl': self.get_time(1)}, {'a': 1}),
asm.get(key))
def test_incr(self):
default_ttl = 2
asm = AerospikeClientMock(default_ttl=2)
key = ("a", "b", "c")
asm.put(key, {"a": 1})
asm.increment(key, "a", 2)
self.assertEqual({('a', 'b', 'c'): {'a': 3}}, asm.dump())
self.assertEqual(
(
('a', 'b', 'c'),
{'gen': 2, 'ttl': self.get_time(default_ttl)},
{'a': 3}
), asm.get(key))
asm.increment(key, "a", 1, meta={"ttl": 1})
self.assertEqual(
(('a', 'b', 'c'), {'gen': 3, 'ttl': self.get_time(1)}, {'a': 4}),
asm.get(key))
def test_undefined_incr(self):
default_ttl = 2
asm = AerospikeClientMock(default_ttl=2)
key = ("a", "b", "c")
asm.increment(key, "a", 1)
self.assertEqual({('a', 'b', 'c'): {'a': 1}}, asm.dump())
self.assertEqual(
(
('a', 'b', 'c'),
{'gen': 1, 'ttl': self.get_time(default_ttl)},
{'a': 1}
), asm.get(key))
def test_append(self):
default_ttl = 2
asm = AerospikeClientMock(default_ttl=2)
key = ("a", "b", "c")
asm.put(key, {"word": "ello"})
asm.prepend(key, "word", "h")
self.assertEqual({('a', 'b', 'c'): {'word': 'hello'}}, asm.dump())
self.assertEqual(
(
('a', 'b', 'c'),
{'gen': 2, 'ttl': self.get_time(default_ttl)},
{'word': 'hello'}
), asm.get(key))
def test_prepend(self):
default_ttl = 2
asm = AerospikeClientMock(default_ttl=2)
key = ("a", "b", "c")
asm.put(key, {"word": "hell"})
asm.append(key, "word", "o")
self.assertEqual({('a', 'b', 'c'): {'word': 'hello'}}, asm.dump())
self.assertEqual(
(
('a', 'b', 'c'),
{'gen': 2, 'ttl': self.get_time(default_ttl)},
{'word': 'hello'}
), asm.get(key))
def test_get(self):
default_ttl = 2
asm = AerospikeClientMock(default_ttl=2)
key = ("a", "b", "c")
asm.put(key, {"a": 1})
self.assertEqual(
(
('a', 'b', 'c'),
{'gen': 1, 'ttl': self.get_time(default_ttl)},
{'a': 1}
), asm.get(key))
# test whether not changing gen
self.assertEqual(
(
('a', 'b', 'c'),
{'gen': 1, 'ttl': self.get_time(default_ttl)},
{'a': 1}
), asm.get(key))
def test_exists(self):
default_ttl = 2
asm = AerospikeClientMock(default_ttl=2)
key = ("a", "b", "c")
asm.put(key, {"a": 1})
self.assertEquals((True, {'gen': 1, 'ttl': self.get_time(default_ttl)}),
asm.exists(key))
# test if not changing gen
self.assertEquals((True, {'gen': 1, 'ttl': self.get_time(default_ttl)}),
asm.exists(key))
def test_expire_exist(self):
asm = AerospikeClientMock(default_ttl=1)
key = ("a", "b", "c")
asm.put(key, {"a": 1})
time.sleep(2)
self.assertEquals((False, None), asm.exists(key))
def test_not_exists(self):
asm = AerospikeClientMock(default_ttl=2)
key = ("a", "b", "c")
self.assertEquals((False, None), asm.exists(key))
def test_touch(self):
default_ttl = 2
asm = AerospikeClientMock(default_ttl=2)
key = ("a", "b", "c")
asm.put(key, {"a": 1})
self.assertEquals(
(
('a', 'b', 'c'),
{'gen': 1, 'ttl': self.get_time(default_ttl)},
{'a': 1}
), asm.get(key))
time.sleep(1)
asm.touch(key)
self.assertEquals(
(
('a', 'b', 'c'),
{'gen': 2, 'ttl': self.get_time(default_ttl)},
{'a': 1}
), asm.get(key))
asm.touch(key, 4)
self.assertEquals(
(('a', 'b', 'c'), {'gen': 3, 'ttl': self.get_time(4)}, {'a': 1}),
asm.get(key))
def test_remove_bin(self):
default_ttl = 2
asm = AerospikeClientMock(default_ttl=2)
key = ("a", "b", "c")
asm.put(key, {"a": 1, "b": 1, "c": 1, "d": 1})
self.assertEquals({('a', 'b', 'c'): {'a': 1, 'c': 1, 'b': 1, 'd': 1}},
asm.dump())
self.assertEqual(
(
('a', 'b', 'c'),
{'gen': 1, 'ttl': self.get_time(default_ttl)},
{'a': 1, 'c': 1, 'b': 1, 'd': 1}
), asm.get(key))
asm.remove_bin(key, ["b", "d"], meta={"ttl": 4})
self.assertEquals({('a', 'b', 'c'): {'a': 1, 'c': 1}}, asm.dump())
self.assertEqual(
(
('a', 'b', 'c'),
{'gen': 2, 'ttl': self.get_time(4)},
{'a': 1, 'c': 1}
), asm.get(key))
asm.remove_bin(key, ["c"])
self.assertEquals({('a', 'b', 'c'): {'a': 1}}, asm.dump())
self.assertEqual(
(
('a', 'b', 'c'),
{'gen': 3, 'ttl': self.get_time(default_ttl)},
{'a': 1}
), asm.get(key))
def test_get_many(self):
default_ttl = 2
asm = AerospikeClientMock(default_ttl=2)
asm.put(("a", "b", 1), {"a": 1})
asm.put(("a", "b", 2), {"a": 2})
asm.put(("a", "b", 3), {"a": 3})
asm.put(("a", "b", 4), {"a": 4})
keys = [
("a", "b", 1),
("a", "b", 2),
("a", "b", 3),
("a", "b", 4),
("a", "b", 5),
]
self.assertEqual(
[
(
('a', 'b', 1),
{'gen': 1, 'ttl': self.get_time(default_ttl)},
{'a': 1}
),
(
('a', 'b', 2),
{'gen': 1, 'ttl': self.get_time(default_ttl)},
{'a': 2}
), (
('a', 'b', 3),
{'gen': 1, 'ttl': self.get_time(default_ttl)},
{'a': 3}
),
(
('a', 'b', 4),
{'gen': 1, 'ttl': self.get_time(default_ttl)},
{'a': 4}
),
(
('a', 'b', 5),
None,
None
),
]
, asm.get_many(keys))
def test_exists_many(self):
default_ttl = 2
asm = AerospikeClientMock(default_ttl=2)
asm.put(("a", "b", 1), {"a": 1})
asm.put(("a", "b", 2), {"a": 2})
asm.put(("a", "b", 3), {"a": 3})
asm.put(("a", "b", 4), {"a": 4})
keys = [
("a", "b", 1),
("a", "b", 2),
("a", "b", 3),
("a", "b", 4),
("a", "b", 5),
]
self.assertEqual(
[
(('a', 'b', 1), {'gen': 1, 'ttl': self.get_time(default_ttl)}),
(('a', 'b', 2), {'gen': 1, 'ttl': self.get_time(default_ttl)}),
(('a', 'b', 3), {'gen': 1, 'ttl': self.get_time(default_ttl)}),
(('a', 'b', 4), {'gen': 1, 'ttl': self.get_time(default_ttl)}),
(('a', 'b', 5), None)
]
, asm.exists_many(keys))
def test_select_many(self):
default_ttl = 2
asm = AerospikeClientMock(default_ttl=2)
asm.put(("a", "b", 1), {"a": 1, "b": 1})
asm.put(("a", "b", 2), {"a": 2, "b": 2})
asm.put(("a", "b", 3), {"a": 3, "b": 3})
asm.put(("a", "b", 4), {"a": 4, "b": 4})
keys = [
("a", "b", 1),
("a", "b", 2),
("a", "b", 3),
("a", "b", 4),
("a", "b", 5),
]
self.assertEqual(
{
('a', 'b', 3): {'a': 3, 'b': 3},
('a', 'b', 2): {'a': 2, 'b': 2},
('a', 'b', 4): {'a': 4, 'b': 4},
('a', 'b', 1): {'a': 1, 'b': 1},
},
asm.dump())
self.assertEqual(
[
(('a', 'b', 1),
{'gen': 1, 'ttl': self.get_time(default_ttl)},
{'a': 1, 'b': 1}),
(('a', 'b', 2),
{'gen': 1, 'ttl': self.get_time(default_ttl)},
{'a': 2, 'b': 2}),
(('a', 'b', 3),
{'gen': 1, 'ttl': self.get_time(default_ttl)},
{'a': 3, 'b': 3}),
(('a', 'b', 4),
{'gen': 1, 'ttl': self.get_time(default_ttl)},
{'a': 4, 'b': 4}),
None,
]
, asm.select_many(keys, ["a", "b"]))
self.assertEqual(
[
(
('a', 'b', 1),
{'gen': 1, 'ttl': self.get_time(default_ttl)},
{'b': 1}
),
(
('a', 'b', 2),
{'gen': 1, 'ttl': self.get_time(default_ttl)},
{'b': 2}
),
(
('a', 'b', 3),
{'gen': 1, 'ttl': self.get_time(default_ttl)},
{'b': 3}
),
(
('a', 'b', 4),
{'gen': 1, 'ttl': self.get_time(default_ttl)},
{'b': 4}
),
None,
]
, asm.select_many(keys, ["b"]))
if __name__ == '__main__':
unittest.main()
|
|
"""
Family of two-dimensional functions indexed by x and y.
All functions are written to be valid both for scalar x and y, and for
numpy arrays of x and y (in which case the result is also an array);
the functions therefore have the same mathematical behaviour as numpy.
$Id$
"""
from __future__ import with_statement
__version__='$Revision$'
from math import pi
from numpy.oldnumeric import where,maximum,cos,sqrt,divide,greater_equal,bitwise_xor,exp
from numpy.oldnumeric import arcsin,logical_and,logical_or,less,minimum
from numpy import seterr, log
from contextlib import contextmanager
# CEBALERT: abs() is used in various places in this file, but I don't
# see it on the list of numpy imports. I guess we're mistakenly not
# using numpy's abs...
@contextmanager
def float_error_ignore():
"""
Many of the functions in this module use Gaussian smoothing, which
is based on a calculation like exp(divide(x*x,sigma)). When sigma
is zero the value of this expression should be zero at all points
in the plane, because such a Gaussian is infinitely small.
Obtaining the correct answer using finite-precision floating-point
array computations requires allowing infinite values to be
returned from divide(), and allowing exp() to underflow silently
to zero when given an infinite value. In numpy this is achieved
by using its seterr() function to disable divide-by-zero and
underflow warnings temporarily while these values are being
computed.
"""
oldsettings=seterr(divide='ignore',under='ignore')
yield
seterr(**oldsettings)
def gaussian(x, y, xsigma, ysigma):
"""
Two-dimensional oriented Gaussian pattern (i.e., 2D version of a
bell curve, like a normal distribution but not necessarily summing
to 1.0).
"""
if xsigma==0.0 or ysigma==0.0:
return x*0.0
with float_error_ignore():
x_w = divide(x,xsigma)
y_h = divide(y,ysigma)
return exp(-0.5*x_w*x_w + -0.5*y_h*y_h)
def log_gaussian(x, y, x_sigma, y_sigma, mu):
"""
Two-dimensional oriented Log Gaussian pattern (i.e., 2D version of a
bell curve with an independent, movable peak). Much like a normal
distribution, but not necessarily placing the peak above the center,
and not necessarily summing to 1.0).
"""
if x_sigma==0.0 or y_sigma==0.0:
return x * 0.0
with float_error_ignore():
x_w = divide(log(x)-mu, x_sigma*x_sigma)
y_h = divide(log(y)-mu, y_sigma*y_sigma)
return exp(-0.5*x_w*x_w + -0.5*y_h*y_h)
def sigmoid(axis, slope):
"""
Sigmoid dividing axis into a positive and negative half,
with a smoothly sloping transition between them (controlled by the slope).
At default rotation, axis refers to the vertical (y) axis.
"""
with float_error_ignore():
return (2.0 / (1.0 + exp(-2.0*slope*axis))) - 1.0
def exponential(x, y, xscale, yscale):
"""
Two-dimensional oriented exponential decay pattern.
"""
if xscale==0.0 or yscale==0.0:
return x*0.0
with float_error_ignore():
x_w = divide(x,xscale)
y_h = divide(y,yscale)
return exp(-sqrt(x_w*x_w+y_h*y_h))
def gabor(x, y, xsigma, ysigma, frequency, phase):
"""
Gabor pattern (sine grating multiplied by a circular Gaussian).
"""
if xsigma==0.0 or ysigma==0.0:
return x*0.0
with float_error_ignore():
x_w = divide(x,xsigma)
y_h = divide(y,ysigma)
p = exp(-0.5*x_w*x_w + -0.5*y_h*y_h)
return p * 0.5*cos(2*pi*frequency*y + phase)
# JABHACKALERT: Shouldn't this use 'size' instead of 'thickness',
# for consistency with the other patterns? Right now, it has a
# size parameter and ignores it, which is very confusing. I guess
# it's called thickness to match ring, but matching gaussian and disk
# is probably more important.
def line(y, thickness, gaussian_width):
"""
Infinite-length line with a solid central region, then Gaussian fall-off at the edges.
"""
distance_from_line = abs(y)
gaussian_y_coord = distance_from_line - thickness/2.0
sigmasq = gaussian_width*gaussian_width
if sigmasq==0.0:
falloff = y*0.0
else:
with float_error_ignore():
falloff = exp(divide(-gaussian_y_coord*gaussian_y_coord,2*sigmasq))
return where(gaussian_y_coord<=0, 1.0, falloff)
def disk(x, y, height, gaussian_width):
"""
Circular disk with Gaussian fall-off after the solid central region.
"""
disk_radius = height/2.0
distance_from_origin = sqrt(x**2+y**2)
distance_outside_disk = distance_from_origin - disk_radius
sigmasq = gaussian_width*gaussian_width
if sigmasq==0.0:
falloff = x*0.0
else:
with float_error_ignore():
falloff = exp(divide(-distance_outside_disk*distance_outside_disk,
2*sigmasq))
return where(distance_outside_disk<=0,1.0,falloff)
def ring(x, y, height, thickness, gaussian_width):
"""
Circular ring (annulus) with Gaussian fall-off after the solid ring-shaped region.
"""
radius = height/2.0
half_thickness = thickness/2.0
distance_from_origin = sqrt(x**2+y**2)
distance_outside_outer_disk = distance_from_origin - radius - half_thickness
distance_inside_inner_disk = radius - half_thickness - distance_from_origin
ring = 1.0-bitwise_xor(greater_equal(distance_inside_inner_disk,0.0),greater_equal(distance_outside_outer_disk,0.0))
sigmasq = gaussian_width*gaussian_width
if sigmasq==0.0:
inner_falloff = x*0.0
outer_falloff = x*0.0
else:
with float_error_ignore():
inner_falloff = exp(divide(-distance_inside_inner_disk*distance_inside_inner_disk, 2.0*sigmasq))
outer_falloff = exp(divide(-distance_outside_outer_disk*distance_outside_outer_disk, 2.0*sigmasq))
return maximum(inner_falloff,maximum(outer_falloff,ring))
def smooth_rectangle(x, y, rec_w, rec_h, gaussian_width_x, gaussian_width_y):
"""
Rectangle with a solid central region, then Gaussian fall-off at the edges.
"""
gaussian_x_coord = abs(x)-rec_w/2.0
gaussian_y_coord = abs(y)-rec_h/2.0
box_x=less(gaussian_x_coord,0.0)
box_y=less(gaussian_y_coord,0.0)
sigmasq_x=gaussian_width_x*gaussian_width_x
sigmasq_y=gaussian_width_y*gaussian_width_y
with float_error_ignore():
falloff_x=x*0.0 if sigmasq_x==0.0 else \
exp(divide(-gaussian_x_coord*gaussian_x_coord,2*sigmasq_x))
falloff_y=y*0.0 if sigmasq_y==0.0 else \
exp(divide(-gaussian_y_coord*gaussian_y_coord,2*sigmasq_y))
return minimum(maximum(box_x,falloff_x), maximum(box_y,falloff_y))
def arc_by_radian(x, y, height, radian_range, thickness, gaussian_width):
"""
Radial arc with Gaussian fall-off after the solid ring-shaped
region with the given thickness, with shape specified by the
(start,end) radian_range.
"""
# Create a circular ring (copied from the ring function)
radius = height/2.0
half_thickness = thickness/2.0
distance_from_origin = sqrt(x**2+y**2)
distance_outside_outer_disk = distance_from_origin - radius - half_thickness
distance_inside_inner_disk = radius - half_thickness - distance_from_origin
ring = 1.0-bitwise_xor(greater_equal(distance_inside_inner_disk,0.0),greater_equal(distance_outside_outer_disk,0.0))
sigmasq = gaussian_width*gaussian_width
if sigmasq==0.0:
inner_falloff = x*0.0
outer_falloff = x*0.0
else:
with float_error_ignore():
inner_falloff = exp(divide(-distance_inside_inner_disk*distance_inside_inner_disk, 2.0*sigmasq))
outer_falloff = exp(divide(-distance_outside_outer_disk*distance_outside_outer_disk, 2.0*sigmasq))
output_ring = maximum(inner_falloff,maximum(outer_falloff,ring))
# Calculate radians (in 4 phases) and cut according to the set range)
# RZHACKALERT:
# Function float_error_ignore() cannot catch the exception when
# both dividend and divisor are 0.0, and when only divisor is 0.0
# it returns 'Inf' rather than 0.0. In x, y and
# distance_from_origin, only one point in distance_from_origin can
# be 0.0 (circle center) and in this point x and y must be 0.0 as
# well. So here is a hack to avoid the 'invalid value encountered
# in divide' error by turning 0.0 to 1e-5 in distance_from_origin.
distance_from_origin += where(distance_from_origin == 0.0, 1e-5, 0)
with float_error_ignore():
sines = divide(y, distance_from_origin)
cosines = divide(x, distance_from_origin)
arcsines = arcsin(sines)
phase_1 = where(logical_and(sines >= 0, cosines >= 0), 2*pi-arcsines, 0)
phase_2 = where(logical_and(sines >= 0, cosines < 0), pi+arcsines, 0)
phase_3 = where(logical_and(sines < 0, cosines < 0), pi+arcsines, 0)
phase_4 = where(logical_and(sines < 0, cosines >= 0), -arcsines, 0)
arcsines = phase_1 + phase_2 + phase_3 + phase_4
if radian_range[0] <= radian_range[1]:
return where(logical_and(arcsines >= radian_range[0], arcsines <= radian_range[1]),
output_ring, 0.0)
else:
return where(logical_or(arcsines >= radian_range[0], arcsines <= radian_range[1]),
output_ring, 0.0)
def arc_by_center(x, y, arc_box, constant_length, thickness, gaussian_width):
"""
Arc with Gaussian fall-off after the solid ring-shaped region and specified
by point of tangency (x and y) and arc width and height.
This function calculates the start and end radian from the given width and
height, and then calls arc_by_radian function to draw the curve.
"""
arc_w=arc_box[0]
arc_h=abs(arc_box[1])
if arc_w==0.0: # arc_w=0, don't draw anything
radius=0.0
angles=(0.0,0.0)
elif arc_h==0.0: # draw a horizontal line, width=arc_w
return smooth_rectangle(x, y, arc_w, thickness, 0.0, gaussian_width)
else:
if constant_length:
curvature=arc_h/arc_w
radius=arc_w/(2*pi*curvature)
angle=curvature*(2*pi)/2.0
else: # constant width
radius=arc_h/2.0+arc_w**2.0/(8*arc_h)
angle=arcsin(arc_w/2.0/radius)
if arc_box[1]<0: # convex shape
y=y+radius
angles=(3.0/2.0*pi-angle, 3.0/2.0*pi+angle)
else: # concave shape
y=y-radius
angles=(pi/2.0-angle, pi/2.0+angle)
return arc_by_radian(x, y, radius*2.0, angles, thickness, gaussian_width)
|
|
# -*- coding: utf-8 -*-
"""
Contains code that defines the behaviour of the local node in the DHT network.
"""
from .routingtable import RoutingTable
from .lookup import Lookup
from .storage import DictDataStore
from .contact import PeerNode
from .crypto import check_seal, get_seal, verify_item, construct_key
from .errors import (BadMessage, ExpiredMessage, OutOfDateMessage,
UnverifiableProvenance, TimedOut)
from .messages import (OK, Store, FindNode, Nodes, FindValue,
Value, from_dict, to_dict)
from .constants import (REPLICATE_INTERVAL, REFRESH_INTERVAL,
RESPONSE_TIMEOUT, K)
from ..version import get_version
import logging
import time
import asyncio
from hashlib import sha512
from uuid import uuid4
log = logging.getLogger(__name__)
class Node(object):
"""
This class represents a single local node in the DHT encapsulating its
presence in the network.
All interactions with the DHT network are performed via this class (or a
subclass).
"""
def __init__(self, public_key, private_key, event_loop, connector,
reply_port):
"""
Initialises the node with the credentials, event loop and object
via which the node opens connections to peers. The reply_port
argument tells other nodes on the network the port to use to contact
this node. Such a port may not be the port used by the local machine
but could be, for example, the port assigned by the UPnP setup of the
local router.
"""
self.public_key = public_key
self.private_key = private_key
self.event_loop = event_loop
self.connector = connector
self.reply_port = reply_port
# The node's ID within the distributed hash table.
self.network_id = sha512(public_key.encode('ascii')).hexdigest()
# Reference to the event loop.
self.event_loop = event_loop
# The routing table stores information about other nodes on the DHT.
self.routing_table = RoutingTable(self.network_id)
# The local key/value store containing data held by this node.
self.data_store = DictDataStore()
# A dictionary of IDs for messages pending a response and associated
# Future instances to be fired when a response is completed.
self.pending = {}
# The version of Drogulus that this node implements.
self.version = get_version()
log.info('Initialised node with id: {}'.format(self.network_id))
def join(self, data_dump):
"""
Causes the Node to join the DHT network. This should be called before
any other DHT operations. The seed_nodes argument must be a list of
already known contacts describing existing nodes on the network.
"""
if not data_dump.get('contacts', []):
raise ValueError('Cannot join network: no contacts supplied')
self.routing_table.restore(data_dump)
# Ensure the refresh of k-buckets is set up properly.
self.event_loop.call_later(REFRESH_INTERVAL, self.refresh)
# Looking up the node's ID on the network will populate the routing
# table with fresh nodes as well as tell us who our nearest neighbours
# are.
return Lookup(FindNode, self.network_id, self, self.event_loop)
def message_received(self, message, protocol, address, port):
"""
Handles incoming messages.
The protocol, address and port arguments are used to create the
remote contact's URI used to identify them on the network.
"""
# Check the "seal" of the sender to make sure it's legit.
if not check_seal(message):
raise BadMessage()
# Update the routing table.
uri = '{protocol}://{address}:{port}'.format(protocol=protocol,
address=address,
port=port)
other_node = PeerNode(message.sender, message.version, uri,
time.time())
log.info('Message received from {}'.format(other_node))
log.info(message)
self.routing_table.add_contact(other_node)
# Sort on message type and pass to handler method. Explicit > implicit.
try:
if isinstance(message, OK):
return self.handle_ok(message)
elif isinstance(message, Store):
return self.handle_store(message, other_node)
elif isinstance(message, FindNode):
return self.handle_find_node(message, other_node)
elif isinstance(message, FindValue):
return self.handle_find_value(message, other_node)
elif isinstance(message, Value):
return self.handle_value(message, other_node)
elif isinstance(message, Nodes):
return self.handle_nodes(message)
except Exception as ex:
log.error('Problem handling message from {}'.format(other_node))
log.error(message)
log.error(ex)
def send_message(self, contact, message, fire_and_forget=False):
"""
Sends a message to the specified contact, adds the resulting future to
the pending dictionary and ensures it times-out after the correct
period. A callback is added to ensure that the task is removed from
pending when it resolves (no matter the result). A timeout function
is scheduled after RESPONSE_TIMEOUT seconds to clean up the pending
task if the remote peer doesn't respond in a timely fashion.
"""
# A Future that represents the delivery of the message.
delivery = self.connector.send(contact, message, self)
# A Future that resolves with the response to the outgoing message.
response_received = asyncio.Future()
self.pending[message.uuid] = response_received
def on_delivery(task, node=self, response_received=response_received,
message=message):
"""
Called when the delivery of the message either succeeds or fails.
If the delivery failed then punish the remote peer and resolve
response_received appropriately. Otherwise make sure the
appropriate timeout or fire-and-forget handling is put in place
on the response_received Future.
"""
if task.exception():
node.routing_table.remove_contact(contact.network_id)
if not response_received.done():
response_received.set_exception(task.exception())
else:
if fire_and_forget:
node.event_loop.call_soon(response_received.set_result,
'sent')
else:
error = TimedOut('Response took too long.')
node.event_loop.call_later(RESPONSE_TIMEOUT,
node.trigger_task,
message, error)
delivery.add_done_callback(on_delivery)
def on_response(future, uuid=message.uuid):
"""
Ensure the resolved response_received is removed from the pending
dictionary.
"""
if uuid in self.pending:
del self.pending[uuid]
response_received.add_done_callback(on_response)
return message.uuid, response_received
def trigger_task(self, message, error=False):
"""
Given a message, will attempt to retrieve the related pending task
and trigger it with the message.
"""
if message.uuid in self.pending:
task = self.pending[message.uuid]
if not task.done():
if error:
task.set_exception(error)
else:
task.set_result(message)
# Remove the resolved task from the pending dictionary.
del self.pending[message.uuid]
def handle_ok(self, message):
"""
Handles an incoming ok message.
"""
self.trigger_task(message)
def handle_store(self, message, contact):
"""
Handles an incoming Store message. Checks the provenance and timeliness
of the message before storing locally. If there is a problem, removes
the untrustworthy peer from the routing table. Otherwise, at
REPLICATE_INTERVAL minutes in the future, the local node will attempt
to replicate the Store message elsewhere in the DHT if such time is
<= the message's expiry time.
Sends an OK message if successful.
"""
# Check provenance
if verify_item(to_dict(message)):
# Ensure the key is correct.
k = construct_key(message.public_key, message.name)
if k != message.key:
# This may indicate a different / unknown / unsupported
# version of the drogulus created the original message.
raise BadMessage('Key mismatch')
# Ensure the value isn't expired.
now = time.time()
if message.expires > 0 and (message.expires < now):
# There's a non-zero expiry and it's less than the current
# time, so return an error.
raise ExpiredMessage(
'Expired at {} (current time: {})'.format(message.expires,
now))
# Ensure the node doesn't already have a more up-to-date version
# of the value.
current = self.data_store.get(message.key, False)
if current and (message.timestamp < current.timestamp):
# The node already has a later version of the value so
# return an error.
raise OutOfDateMessage(
'Most recent timestamp: {}'.format(current.timestamp))
# Good to go, so store value.
self.data_store[message.key] = message
# At some future time attempt to replicate the Store message
# around the network IF it is within the message's expiry time.
self.event_loop.call_later(REPLICATE_INTERVAL, self.republish,
message.key)
# Reply with an OK so the other end updates its routing table.
return self.make_ok(message)
else:
# Remove from the routing table.
log.error('Problem with Store command from {}'.format(contact))
self.routing_table.blacklist(contact)
raise UnverifiableProvenance('Blacklisted')
def handle_find_node(self, message, contact):
"""
Handles an incoming FindNode message. Finds the details of up to K
other nodes closer to the target key that *this* node knows about.
Responds with a "Nodes" message containing the list of matching
nodes.
"""
target_key = message.key
# List containing tuples of information about the matching contacts.
other_nodes = [[n.public_key, n.version, n.uri] for n in
self.routing_table.find_close_nodes(target_key)]
return self.make_nodes(message, other_nodes)
def handle_find_value(self, message, contact):
"""
Handles an incoming FindValue message. If the local node contains the
value associated with the requested key replies with an appropriate
"Value" message. Otherwise, responds with details of up to K other
nodes closer to the target key that the local node knows about. In
this case a "Nodes" message containing the list of matching nodes is
sent to the remote peer.
"""
match = self.data_store.get(message.key, False)
if match:
# Update the last access time for the matching value.
self.data_store.touch(message.key)
return self.make_value(message, match.key, match.value,
match.timestamp, match.expires,
match.created_with, match.public_key,
match.name, match.signature)
else:
return self.handle_find_node(message, contact)
def handle_value(self, message, contact):
"""
Handles an incoming Value message containing a value retrieved from
another node on the DHT. Ensures the message is valid and resolves the
referenced future to signal the arrival of the value.
If the value is invalid then the reponse is logged, the remote peer
is blacklisted and the referenced future is resolved with an
UnverifiableProvenance exception.
"""
if verify_item(to_dict(message)):
self.trigger_task(message)
else:
log.error(
'Problem with incoming Value message from {}'.format(contact))
log.error(message)
self.routing_table.remove_contact(contact.network_id, True)
log.error('Remote peer removed from routing table.')
self.trigger_task(message,
error=UnverifiableProvenance('Blacklisted'))
def handle_nodes(self, message):
"""
Handles an incoming Nodes message containing information about other
nodes on the network that are close to a requested key.
"""
self.trigger_task(message)
def make_ok(self, message):
"""
Returns an OK acknowledgement appropriate given the incoming message.
"""
ok = {
'uuid': message.uuid,
'recipient': message.sender,
'sender': self.public_key,
'reply_port': self.reply_port,
'version': self.version
}
seal = get_seal(ok, self.private_key)
ok['seal'] = seal
ok['message'] = 'ok'
return from_dict(ok)
def make_value(self, message, key, value, timestamp, expires,
created_with, public_key, name, signature):
"""
Returns a valid Value message in response to the referenced message.
"""
msg_dict = {
'uuid': message.uuid,
'recipient': message.sender,
'sender': self.public_key,
'reply_port': self.reply_port,
'version': self.version,
'key': key,
'value': value,
'timestamp': timestamp,
'expires': expires,
'created_with': created_with,
'public_key': public_key,
'name': name,
'signature': signature,
}
seal = get_seal(msg_dict, self.private_key)
msg_dict['seal'] = seal
msg_dict['message'] = 'value'
return from_dict(msg_dict)
def make_nodes(self, message, nodes):
"""
Returns a valid Nodes message in response to the referenced incoming
message.
"""
msg_dict = {
'uuid': message.uuid,
'recipient': message.sender,
'sender': self.public_key,
'reply_port': self.reply_port,
'version': self.version,
'nodes': nodes,
}
seal = get_seal(msg_dict, self.private_key)
msg_dict['seal'] = seal
msg_dict['message'] = 'nodes'
return from_dict(msg_dict)
def send_store(self, contact, key, value, timestamp, expires,
created_with, public_key, name, signature):
"""
Sends a Store message to the given contact. The value contained within
the message is stored against a key derived from the public_key and
name. Furthermore, the message is cryptographically signed using the
value, timestamp, expires, name and meta values.
"""
msg_dict = {
'uuid': str(uuid4()),
'recipient': contact.public_key,
'sender': self.public_key,
'reply_port': self.reply_port,
'version': self.version,
'key': key,
'value': value,
'timestamp': timestamp,
'expires': expires,
'created_with': created_with,
'public_key': public_key,
'name': name,
'signature': signature,
}
seal = get_seal(msg_dict, self.private_key)
msg_dict['seal'] = seal
msg_dict['message'] = 'store'
message = from_dict(msg_dict)
return self.send_message(contact, message)
def send_find(self, contact, target, message_type):
"""
Sends a Find[Node|Value] message to the given contact with the
intention of obtaining information at the given target key. The type of
find message is specified by message_type.
This method is called by an instance of the Lookup class.
"""
msg_dict = {
'uuid': str(uuid4()),
'recipient': contact.public_key,
'sender': self.public_key,
'reply_port': self.reply_port,
'version': self.version,
'key': target,
}
seal = get_seal(msg_dict, self.private_key)
msg_dict['seal'] = seal
if message_type is FindNode:
msg_dict['message'] = 'findnode'
else:
msg_dict['message'] = 'findvalue'
message = from_dict(msg_dict)
return self.send_message(contact, message)
def _store_to_nodes(self, nearest_nodes, duplicate, key, value, timestamp,
expires, created_with, public_key, name, signature):
"""
Given a list of nearest nodes will return a list of send_store based
tasks for the item based upon the args to be stored in the DHT at
those locations. The list will contain up to "duplicate" number of
pending tasks.
"""
# Guards to ensure meaningful duplication.
if duplicate < 1:
raise ValueError('Duplication count may not be less than 1')
if len(nearest_nodes) < 1:
raise ValueError('Empty list of nearest nodes.')
list_of_tasks = []
for contact in nearest_nodes[:duplicate]:
uuid, task = self.send_store(contact, key, value, timestamp,
expires, created_with, public_key,
name, signature)
list_of_tasks.append(task)
return list_of_tasks
def replicate(self, duplicate, key, value, timestamp, expires,
created_with, public_key, name, signature):
"""
Will replicate item to "duplicate" number of nodes in the distributed
hash table. Returns a task that will fire with a list of send_store
tasks when "duplicate" number of closest nodes have been identified.
Obviously, the list can be consumed by asycnio.wait or asyncio.gather
to fire when the store commands have completed.
"""
if duplicate < 1:
# Guard to ensure meaningful duplication count. This may save
# time.
raise ValueError('Duplication count may not be less than 1')
result = asyncio.Future()
compound_key = construct_key(public_key, name)
lookup = Lookup(FindNode, compound_key, self, self.event_loop)
if lookup.done():
# If we get here it's because lookup couldn't start due to an
# empty routing table.
result.set_exception(lookup.exception())
return result
def on_result(r, duplicate=duplicate, result=result, key=key,
value=value, timestamp=timestamp, expires=expires,
created_with=created_with, public_key=public_key,
name=name, signature=signature):
"""
To be called when the lookup completes.
If successful, send a store message to "duplicate" number of
contacts in the list of the close nodes have been found have by
the lookup and resolve the "result" Future with the resulting
list of pending tasks.
If there was an error simply pass the exception on via the
Future representing the result.
"""
try:
contacts = r.result()
tasks = self._store_to_nodes(contacts, duplicate, key, value,
timestamp, expires, created_with,
public_key, name, signature)
result.set_result(tasks)
except Exception as ex:
result.set_exception(ex)
lookup.add_done_callback(on_result)
return result
def retrieve(self, key):
"""
Given a key, will try to retrieve associated value from the distributed
hash table. Returns a Future that will resolve when the operation is
complete or failed.
As the original Kademlia explains:
"For caching purposes, once a lookup succeeds, the requesting node
stores the <key, value> pair at the closest node it observed to the
key that did not return the value."
This method adds a callback to the NodeLookup to achieve this end.
"""
lookup = Lookup(FindValue, key, self, self.event_loop)
if lookup.done():
# If we get here it's because lookup couldn't start due to an
# empty routing table.
return lookup
def cache_result(lookup):
"""
Called once the lookup resolves in order to store the item at the
node closest to the key that did not return the value. If the
lookup encountered an exception then no further action is taken.
"""
if lookup.exception():
return
caching_contact = None
for candidate in lookup.shortlist:
if candidate in lookup.contacted:
caching_contact = candidate
break
if caching_contact:
log.info("Caching to {}".format(caching_contact))
result = lookup.result()
self.send_store(caching_contact, lookup.target, result.value,
result.timestamp, result.expires,
result.created_with, result.public_key,
result.name, result.signature)
lookup.add_done_callback(cache_result)
return lookup
def refresh(self):
"""
A periodically called method that will check and refresh the k-buckets
in the node's routing table.
"""
refresh_ids = self.routing_table.get_refresh_list()
log.info('Refreshing buckets with ids: {}'.format(refresh_ids))
for network_id in refresh_ids:
Lookup(FindNode, network_id, self, self.event_loop)
# schedule the next refresh.
self.event_loop.call_later(REFRESH_INTERVAL, self.refresh)
def republish(self, item_key):
"""
Periodically called to check and republish a locally stored item to
the wider network.
From the original Kademlia paper:
"To ensure the persistence of key-value pairs, nodes must periodically
republish keys. Otherwise, two phenomena may cause lookups for valid
keys to fail. First, some of the k nodes that initially get a key-value
pair when it is published may leave the network. Second, new nodes may
join the network with IDs closer to some published key than the nodes
on which the key-value pair was originally published. In both cases,
the nodes with a key-value pair must republish it so as once again to
ensure it is available on the k nodes closest to the key.
To compensate for nodes leaving the network, Kademlia republishes each
key-value pair once an hour. A naive implementation of this strategy
would require many messages - each of up to k nodes storing a key-value
pair would perform a node lookup followed by k - 1 STORE RPCs every
hour. Fortunately, the republish process can be heavily optimized.
First, when a node receives a STORE RPC for a given key-value pair, it
assumes the RPC was also issued to the other k - 1 closest nodes, and
thus the recipient will not republish the key-value pair in the next
hour. This ensures that as long as republication intervals are not
exactly synchronized, only one node will republish a given key-value
pair every hour."
In this implementation, messages are only republished if the following
requirements are met:
* The item still exists in the local data store (it may have been
deleted in the time between the time of the call and the time the call
was scheduled).
* The item has not expired.
* The item has not been updated for REPLICATE_INTERVAL seconds.
Furthermore, the item is deleted from the local data store (after
republication) if the item has not been requested during the proceeding
REPLICATE_INTERVAL seconds. This ensures items are not over-cached but
remain stored at peer nodes whose network ids are closest to the
item's key.
"""
log.info('Republish check for key: {}'.format(item_key))
if item_key in self.data_store:
item = self.data_store[item_key]
now = time.time()
if item.expires > 0.0 and item.expires < now:
# The item has expired. If the item's expiry is 0 (or less)
# then the item should never expire.
del self.data_store[item_key]
log.info('{} expired. Deleted from local data store.'
.format(item_key))
else:
updated = self.data_store.updated(item_key)
accessed = self.data_store.accessed(item_key)
update_delta = now - updated
access_delta = now - accessed
replicated = False
if update_delta > REPLICATE_INTERVAL:
# The item needs republishing because it hasn't been
# updated within the specified time interval.
log.info('Republishing item {}.'.format(item_key))
self.replicate(K, item.key, item.value, item.timestamp,
item.expires, item.created_with,
item.public_key, item.name, item.signature)
replicated = True
# Re-schedule the republication check.
handler = self.event_loop.call_later(REPLICATE_INTERVAL,
self.republish, item_key)
if access_delta > REPLICATE_INTERVAL:
# The item has not been accessed for a while so, if
# required, replicate the item and then remove it from the
# local data store. Remember to cancel the scheduled
# republication check.
log.info('Removing {} due to lack of activity.'
.format(item_key))
if not replicated:
self.replicate(K, item.key, item.value,
item.timestamp, item.expires,
item.created_with, item.public_key,
item.name, item.signature)
del self.data_store[item_key]
handler.cancel()
else:
log.info('{} is no longer in local data store. Cancelled.'
.format(item_key))
|
|
from __future__ import unicode_literals
import datetime
from decimal import Decimal
import re
import warnings
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
Avg, Sum, Count, Max, Min,
Aggregate, F, Value, Func,
IntegerField, FloatField, DecimalField)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
from django.db.models.sql import aggregates as sql_aggregates
from django.test import TestCase
from django.test.utils import Approximate
from django.test.utils import CaptureQueriesContext
from django.utils.deprecation import RemovedInDjango20Warning
from .models import Author, Publisher, Book, Store
class BaseAggregateTestCase(TestCase):
fixtures = ["aggregation.json"]
def test_empty_aggregate(self):
self.assertEqual(Author.objects.all().aggregate(), {})
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)})
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["age__sum"], 254)
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["friends__age__avg"], 34.07, places=2)
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["authors__age__avg"], 38.2857, places=2)
vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__rating__avg"], 4.0)
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["publisher__num_awards__sum"], 30)
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__price__sum"], Decimal("270.27"))
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["books__authors__age__max"], 57)
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__publisher__num_awards__min"], 1)
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["amazon_mean"], 4.08, places=2)
def test_annotate_basic(self):
self.assertQuerysetEqual(
Book.objects.annotate().order_by('pk'), [
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp"
],
lambda b: b.name
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=1)
self.assertEqual(
b.name,
'The Definitive Guide to Django: Web Development Done Right'
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_defer(self):
qs = Book.objects.annotate(
page_sum=Sum("pages")).defer('name').filter(pk=1)
rows = [
(1, "159059725", 447, "The Definitive Guide to Django: Web Development Done Right")
]
self.assertQuerysetEqual(
qs.order_by('pk'), rows,
lambda r: (r.id, r.isbn, r.page_sum, r.name)
)
def test_annotate_defer_select_related(self):
qs = Book.objects.select_related('contact').annotate(
page_sum=Sum("pages")).defer('name').filter(pk=1)
rows = [
(1, "159059725", 447, "Adrian Holovaty",
"The Definitive Guide to Django: Web Development Done Right")
]
self.assertQuerysetEqual(
qs.order_by('pk'), rows,
lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name)
)
def test_annotate_m2m(self):
books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 51.5),
('Practical Django Projects', 29.0),
('Python Web Development with Django', Approximate(30.3, places=1)),
('Sams Teach Yourself Django in 24 Hours', 45.0)
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
],
lambda b: (b.name, b.num_authors)
)
def test_backwards_m2m_annotate(self):
authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 4.5),
('Brad Dayley', 3.0),
('Jacob Kaplan-Moss', 4.5),
('James Bennett', 4.0),
('Paul Bissex', 4.0),
('Stuart Russell', 4.0)
],
lambda a: (a.name, a.book__rating__avg)
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 1),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 1),
('Peter Norvig', 2),
('Stuart Russell', 1),
('Wesley J. Chun', 1)
],
lambda a: (a.name, a.num_books)
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 7),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9),
('Practical Django Projects', 3),
('Python Web Development with Django', 7),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 3)
],
lambda b: (b.name, b.publisher__num_awards__sum)
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
publishers, [
('Apress', Decimal("59.69")),
("Jonno's House of Books", None),
('Morgan Kaufmann', Decimal("75.00")),
('Prentice Hall', Decimal("112.49")),
('Sams', Decimal("23.09"))
],
lambda p: (p.name, p.book__price__sum)
)
def test_annotate_values(self):
books = list(Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values())
self.assertEqual(
books, [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg('authors__age')).values('pk', 'isbn', 'mean_age')
self.assertEqual(
list(books), [
{
"pk": 1,
"isbn": "159059725",
"mean_age": 34.5,
}
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values("name")
self.assertEqual(
list(books), [
{
"name": "The Definitive Guide to Django: Web Development Done Right"
}
]
)
books = Book.objects.filter(pk=1).values().annotate(mean_age=Avg('authors__age'))
self.assertEqual(
list(books), [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.values("rating").annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age")).order_by("rating")
self.assertEqual(
list(books), [
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1)
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
}
]
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertEqual(len(authors), 9)
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 32.0),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 29.5),
('James Bennett', 34.0),
('Jeffrey Forcier', 27.0),
('Paul Bissex', 31.0),
('Peter Norvig', 46.0),
('Stuart Russell', 57.0),
('Wesley J. Chun', Approximate(33.66, places=1))
],
lambda a: (a.name, a.friends__age__avg)
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
vals = Book.objects.aggregate(Count("rating", distinct=True))
self.assertEqual(vals, {"rating__count": 4})
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count('book__id')))
implicit = list(Author.objects.annotate(Count('book')))
self.assertEqual(explicit, implicit)
def test_annotate_ordering(self):
books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating')
self.assertEqual(
list(books), [
{
"rating": 4.5,
"oldest": 35,
},
{
"rating": 3.0,
"oldest": 45
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 5.0,
"oldest": 57,
}
]
)
books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating")
self.assertEqual(
list(books), [
{
"rating": 5.0,
"oldest": 57,
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 3.0,
"oldest": 45,
},
{
"rating": 4.5,
"oldest": 35,
}
]
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors"))
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_filtering(self):
p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)
Book.objects.create(
name='ExpensiveBook1',
pages=1,
isbn='111',
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 1)
)
Book.objects.create(
name='ExpensiveBook2',
pages=1,
isbn='222',
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 2)
)
Book.objects.create(
name='ExpensiveBook3',
pages=1,
isbn='333',
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 3)
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1, book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Sams",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=1).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__exact=2).order_by("pk")
self.assertQuerysetEqual(
books, [
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
authors = Author.objects.annotate(num_friends=Count("friends__id", distinct=True)).filter(num_friends=0).order_by("pk")
self.assertQuerysetEqual(
authors, [
"Brad Dayley",
],
lambda a: a.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
],
lambda p: p.name
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1)
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
books = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1)
self.assertQuerysetEqual(
books, [
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains='Norvig')
b = Book.objects.get(name__contains='Done Right')
b.authors.add(a)
b.save()
vals = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1).aggregate(Avg("rating"))
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = Publisher.objects.annotate(earliest_book=Min("book__pubdate")).exclude(earliest_book=None).order_by("earliest_book").values()
self.assertEqual(
list(publishers), [
{
'earliest_book': datetime.date(1991, 10, 15),
'num_awards': 9,
'id': 4,
'name': 'Morgan Kaufmann'
},
{
'earliest_book': datetime.date(1995, 1, 15),
'num_awards': 7,
'id': 3,
'name': 'Prentice Hall'
},
{
'earliest_book': datetime.date(2007, 12, 6),
'num_awards': 3,
'id': 1,
'name': 'Apress'
},
{
'earliest_book': datetime.date(2008, 3, 3),
'num_awards': 1,
'id': 2,
'name': 'Sams'
}
]
)
vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening"))
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
}
)
def test_annotate_values_list(self):
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("pk", "isbn", "mean_age")
self.assertEqual(
list(books), [
(1, "159059725", 34.5),
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("isbn")
self.assertEqual(
list(books), [
('159059725',)
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("mean_age")
self.assertEqual(
list(books), [
(34.5,)
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("mean_age", flat=True)
self.assertEqual(list(books), [34.5])
books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price")
self.assertEqual(
list(books), [
(Decimal("29.69"), 2),
(Decimal('23.09'), 1),
(Decimal('30'), 1),
(Decimal('75'), 1),
(Decimal('82.8'), 1),
]
)
def test_dates_with_aggregation(self):
"""
Test that .dates() returns a distinct set of dates when applied to a
QuerySet with aggregation.
Refs #18056. Previously, .dates() would return distinct (date_kind,
aggregation) sets, in this case (year, num_authors), so 2008 would be
returned twice because there are books from 2008 with a different
number of authors.
"""
dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year')
self.assertQuerysetEqual(
dates, [
"datetime.date(1991, 1, 1)",
"datetime.date(1995, 1, 1)",
"datetime.date(2007, 1, 1)",
"datetime.date(2008, 1, 1)"
]
)
def test_values_aggregation(self):
# Refs #20782
max_rating = Book.objects.values('rating').aggregate(max_rating=Max('rating'))
self.assertEqual(max_rating['max_rating'], 5)
max_books_per_rating = Book.objects.values('rating').annotate(
books_per_rating=Count('id')
).aggregate(Max('books_per_rating'))
self.assertEqual(
max_books_per_rating,
{'books_per_rating__max': 3})
def test_ticket17424(self):
"""
Check that doing exclude() on a foreign model after annotate()
doesn't crash.
"""
all_books = list(Book.objects.values_list('pk', flat=True).order_by('pk'))
annotated_books = Book.objects.order_by('pk').annotate(one=Count("id"))
# The value doesn't matter, we just need any negative
# constraint on a related model that's a noop.
excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__")
# Try to generate query tree
str(excluded_books.query)
self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk)
# Check internal state
self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type)
self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type)
def test_ticket12886(self):
"""
Check that aggregation over sliced queryset works correctly.
"""
qs = Book.objects.all().order_by('-rating')[0:3]
vals = qs.aggregate(average_top3_rating=Avg('rating'))['average_top3_rating']
self.assertAlmostEqual(vals, 4.5, places=2)
def test_ticket11881(self):
"""
Check that subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE
or select_related() stuff.
"""
qs = Book.objects.all().select_for_update().order_by(
'pk').select_related('publisher').annotate(max_pk=Max('pk'))
with CaptureQueriesContext(connection) as captured_queries:
qs.aggregate(avg_pk=Avg('max_pk'))
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]['sql'].lower()
self.assertNotIn('for update', qstr)
forced_ordering = connection.ops.force_no_ordering()
if forced_ordering:
# If the backend needs to force an ordering we make sure it's
# the only "ORDER BY" clause present in the query.
self.assertEqual(
re.findall(r'order by (\w+)', qstr),
[', '.join(forced_ordering).lower()]
)
else:
self.assertNotIn('order by', qstr)
self.assertEqual(qstr.count(' join '), 0)
class ComplexAggregateTestCase(TestCase):
fixtures = ["aggregation.json"]
def test_nonaggregate_aggregation_throws(self):
with self.assertRaisesRegexp(TypeError, 'fail is not an aggregate expression'):
Book.objects.aggregate(fail=F('price'))
def test_nonfield_annotation(self):
book = Book.objects.annotate(val=Max(Value(2, output_field=IntegerField())))[0]
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(Value(2), output_field=IntegerField()))[0]
self.assertEqual(book.val, 2)
def test_missing_output_field_raises_error(self):
with self.assertRaisesRegexp(FieldError, 'Cannot resolve expression type, unknown output_field'):
Book.objects.annotate(val=Max(Value(2)))[0]
def test_annotation_expressions(self):
authors = Author.objects.annotate(combined_ages=Sum(F('age') + F('friends__age'))).order_by('name')
authors2 = Author.objects.annotate(combined_ages=Sum('age') + Sum('friends__age')).order_by('name')
for qs in (authors, authors2):
self.assertEqual(len(qs), 9)
self.assertQuerysetEqual(
qs, [
('Adrian Holovaty', 132),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 129),
('James Bennett', 63),
('Jeffrey Forcier', 128),
('Paul Bissex', 120),
('Peter Norvig', 103),
('Stuart Russell', 103),
('Wesley J. Chun', 176)
],
lambda a: (a.name, a.combined_ages)
)
def test_aggregation_expressions(self):
a1 = Author.objects.aggregate(av_age=Sum('age') / Count('*'))
a2 = Author.objects.aggregate(av_age=Sum('age') / Count('age'))
a3 = Author.objects.aggregate(av_age=Avg('age'))
self.assertEqual(a1, {'av_age': 37})
self.assertEqual(a2, {'av_age': 37})
self.assertEqual(a3, {'av_age': Approximate(37.4, places=1)})
def test_order_of_precedence(self):
p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price') + 2) * 3)
self.assertEqual(p1, {'avg_price': Approximate(148.18, places=2)})
p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg('price') + 2 * 3)
self.assertEqual(p2, {'avg_price': Approximate(53.39, places=2)})
def test_combine_different_types(self):
with self.assertRaisesRegexp(FieldError, 'Expression contains mixed types. You must set output_field'):
Book.objects.annotate(sums=Sum('rating') + Sum('pages') + Sum('price')).get(pk=4)
b1 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=IntegerField())).get(pk=4)
self.assertEqual(b1.sums, 383)
b2 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=FloatField())).get(pk=4)
self.assertEqual(b2.sums, 383.69)
b3 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=DecimalField(max_digits=6, decimal_places=2))).get(pk=4)
self.assertEqual(b3.sums, Decimal("383.69"))
def test_complex_aggregations_require_kwarg(self):
with self.assertRaisesRegexp(TypeError, 'Complex expressions require an alias'):
Author.objects.annotate(Sum(F('age') + F('friends__age')))
with self.assertRaisesRegexp(TypeError, 'Complex aggregates require an alias'):
Author.objects.aggregate(Sum('age') / Count('age'))
def test_aggregate_over_complex_annotation(self):
qs = Author.objects.annotate(
combined_ages=Sum(F('age') + F('friends__age')))
age = qs.aggregate(max_combined_age=Max('combined_ages'))
self.assertEqual(age['max_combined_age'], 176)
age = qs.aggregate(max_combined_age_doubled=Max('combined_ages') * 2)
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'),
sum_combined_age=Sum('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
self.assertEqual(age['sum_combined_age'], 954)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'),
sum_combined_age_doubled=Sum('combined_ages') + Sum('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
self.assertEqual(age['sum_combined_age_doubled'], 954 * 2)
def test_values_annotation_with_expression(self):
# ensure the F() is promoted to the group by clause
qs = Author.objects.values('name').annotate(another_age=Sum('age') + F('age'))
a = qs.get(pk=1)
self.assertEqual(a['another_age'], 68)
qs = qs.annotate(friend_count=Count('friends'))
a = qs.get(pk=1)
self.assertEqual(a['friend_count'], 2)
qs = qs.annotate(combined_age=Sum('age') + F('friends__age')).filter(pk=1).order_by('-combined_age')
self.assertEqual(
list(qs), [
{
"name": 'Adrian Holovaty',
"another_age": 68,
"friend_count": 1,
"combined_age": 69
},
{
"name": 'Adrian Holovaty',
"another_age": 68,
"friend_count": 1,
"combined_age": 63
}
]
)
vals = qs.values('name', 'combined_age')
self.assertEqual(
list(vals), [
{
"name": 'Adrian Holovaty',
"combined_age": 69
},
{
"name": 'Adrian Holovaty',
"combined_age": 63
}
]
)
def test_annotate_values_aggregate(self):
alias_age = Author.objects.annotate(
age_alias=F('age')
).values(
'age_alias',
).aggregate(sum_age=Sum('age_alias'))
age = Author.objects.values('age').aggregate(sum_age=Sum('age'))
self.assertEqual(alias_age['sum_age'], age['sum_age'])
def test_annotate_over_annotate(self):
author = Author.objects.annotate(
age_alias=F('age')
).annotate(
sum_age=Sum('age_alias')
).get(pk=1)
other_author = Author.objects.annotate(
sum_age=Sum('age')
).get(pk=1)
self.assertEqual(author.sum_age, other_author.sum_age)
def test_annotated_aggregate_over_annotated_aggregate(self):
with self.assertRaisesRegexp(FieldError, "Cannot compute Sum\('id__max'\): 'id__max' is an aggregate"):
Book.objects.annotate(Max('id')).annotate(Sum('id__max'))
def test_add_implementation(self):
try:
# test completely changing how the output is rendered
def lower_case_function_override(self, qn, connection):
sql, params = qn.compile(self.source_expressions[0])
substitutions = dict(function=self.function.lower(), expressions=sql)
substitutions.update(self.extra)
return self.template % substitutions, params
setattr(Sum, 'as_' + connection.vendor, lower_case_function_override)
qs = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=IntegerField()))
self.assertEqual(str(qs.query).count('sum('), 1)
b1 = qs.get(pk=4)
self.assertEqual(b1.sums, 383)
# test changing the dict and delegating
def lower_case_function_super(self, qn, connection):
self.extra['function'] = self.function.lower()
return super(Sum, self).as_sql(qn, connection)
setattr(Sum, 'as_' + connection.vendor, lower_case_function_super)
qs = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=IntegerField()))
self.assertEqual(str(qs.query).count('sum('), 1)
b1 = qs.get(pk=4)
self.assertEqual(b1.sums, 383)
# test overriding all parts of the template
def be_evil(self, qn, connection):
substitutions = dict(function='MAX', expressions='2')
substitutions.update(self.extra)
return self.template % substitutions, ()
setattr(Sum, 'as_' + connection.vendor, be_evil)
qs = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=IntegerField()))
self.assertEqual(str(qs.query).count('MAX('), 1)
b1 = qs.get(pk=4)
self.assertEqual(b1.sums, 2)
finally:
delattr(Sum, 'as_' + connection.vendor)
def test_complex_values_aggregation(self):
max_rating = Book.objects.values('rating').aggregate(
double_max_rating=Max('rating') + Max('rating'))
self.assertEqual(max_rating['double_max_rating'], 5 * 2)
max_books_per_rating = Book.objects.values('rating').annotate(
books_per_rating=Count('id') + 5
).aggregate(Max('books_per_rating'))
self.assertEqual(
max_books_per_rating,
{'books_per_rating__max': 3 + 5})
def test_expression_on_aggregation(self):
# Create a plain expression
class Greatest(Func):
function = 'GREATEST'
def as_sqlite(self, qn, connection):
return super(Greatest, self).as_sql(qn, connection, function='MAX')
qs = Publisher.objects.annotate(
price_or_median=Greatest(Avg('book__rating'), Avg('book__price'))
).filter(price_or_median__gte=F('num_awards')).order_by('pk')
self.assertQuerysetEqual(
qs, [1, 2, 3, 4], lambda v: v.pk)
qs2 = Publisher.objects.annotate(
rating_or_num_awards=Greatest(Avg('book__rating'), F('num_awards'),
output_field=FloatField())
).filter(rating_or_num_awards__gt=F('num_awards')).order_by('pk')
self.assertQuerysetEqual(
qs2, [1, 2], lambda v: v.pk)
def test_backwards_compatibility(self):
class SqlNewSum(sql_aggregates.Aggregate):
sql_function = 'SUM'
class NewSum(Aggregate):
name = 'Sum'
def add_to_query(self, query, alias, col, source, is_summary):
klass = SqlNewSum
aggregate = klass(
col, source=source, is_summary=is_summary, **self.extra)
query.annotations[alias] = aggregate
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInDjango20Warning)
qs = Author.objects.values('name').annotate(another_age=NewSum('age') + F('age'))
a = qs.get(pk=1)
self.assertEqual(a['another_age'], 68)
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for file sinks."""
import glob
import logging
import os
import shutil
import tempfile
import unittest
import hamcrest as hc
import mock
import apache_beam as beam
from apache_beam.coders import coders
from apache_beam.io import filebasedsink
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.transforms.display import DisplayData
from apache_beam.transforms.display_test import DisplayDataItemMatcher
from apache_beam.options.value_provider import StaticValueProvider
# TODO: Refactor code so all io tests are using same library
# TestCaseWithTempDirCleanup class.
class _TestCaseWithTempDirCleanUp(unittest.TestCase):
"""Base class for TestCases that deals with TempDir clean-up.
Inherited test cases will call self._new_tempdir() to start a temporary dir
which will be deleted at the end of the tests (when tearDown() is called).
"""
def setUp(self):
self._tempdirs = []
def tearDown(self):
for path in self._tempdirs:
if os.path.exists(path):
shutil.rmtree(path)
self._tempdirs = []
def _new_tempdir(self):
result = tempfile.mkdtemp()
self._tempdirs.append(result)
return result
def _create_temp_file(self, name='', suffix=''):
if not name:
name = tempfile.template
file_name = tempfile.NamedTemporaryFile(
delete=False, prefix=name,
dir=self._new_tempdir(), suffix=suffix).name
return file_name
class MyFileBasedSink(filebasedsink.FileBasedSink):
def open(self, temp_path):
# TODO: Fix main session pickling.
# file_handle = super(MyFileBasedSink, self).open(temp_path)
file_handle = filebasedsink.FileBasedSink.open(self, temp_path)
file_handle.write('[start]')
return file_handle
def write_encoded_record(self, file_handle, encoded_value):
file_handle.write('[')
file_handle.write(encoded_value)
file_handle.write(']')
def close(self, file_handle):
file_handle.write('[end]')
# TODO: Fix main session pickling.
# file_handle = super(MyFileBasedSink, self).close(file_handle)
file_handle = filebasedsink.FileBasedSink.close(self, file_handle)
class TestFileBasedSink(_TestCaseWithTempDirCleanUp):
def test_file_sink_writing(self):
temp_path = os.path.join(self._new_tempdir(), 'FileBasedSink')
sink = MyFileBasedSink(
temp_path, file_name_suffix='.output', coder=coders.ToStringCoder())
# Manually invoke the generic Sink API.
init_token = sink.initialize_write()
writer1 = sink.open_writer(init_token, '1')
writer1.write('a')
writer1.write('b')
res1 = writer1.close()
writer2 = sink.open_writer(init_token, '2')
writer2.write('x')
writer2.write('y')
writer2.write('z')
res2 = writer2.close()
_ = list(sink.finalize_write(init_token, [res1, res2]))
# Retry the finalize operation (as if the first attempt was lost).
res = list(sink.finalize_write(init_token, [res1, res2]))
# Check the results.
shard1 = temp_path + '-00000-of-00002.output'
shard2 = temp_path + '-00001-of-00002.output'
self.assertEqual(res, [shard1, shard2])
self.assertEqual(open(shard1).read(), '[start][a][b][end]')
self.assertEqual(open(shard2).read(), '[start][x][y][z][end]')
# Check that any temp files are deleted.
self.assertItemsEqual([shard1, shard2], glob.glob(temp_path + '*'))
def test_file_sink_display_data(self):
temp_path = os.path.join(self._new_tempdir(), 'display')
sink = MyFileBasedSink(
temp_path, file_name_suffix='.output', coder=coders.ToStringCoder())
dd = DisplayData.create_from(sink)
expected_items = [
DisplayDataItemMatcher(
'compression', 'auto'),
DisplayDataItemMatcher(
'file_pattern',
'{}{}'.format(
temp_path,
'-%(shard_num)05d-of-%(num_shards)05d.output'))]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_empty_write(self):
temp_path = tempfile.NamedTemporaryFile().name
sink = MyFileBasedSink(
temp_path, file_name_suffix='.output', coder=coders.ToStringCoder()
)
p = TestPipeline()
p | beam.Create([]) | beam.io.Write(sink) # pylint: disable=expression-not-assigned
p.run()
self.assertEqual(
open(temp_path + '-00000-of-00001.output').read(), '[start][end]')
def test_static_value_provider_empty_write(self):
temp_path = StaticValueProvider(value_type=str,
value=tempfile.NamedTemporaryFile().name)
sink = MyFileBasedSink(
temp_path,
file_name_suffix=StaticValueProvider(value_type=str, value='.output'),
coder=coders.ToStringCoder()
)
p = TestPipeline()
p | beam.Create([]) | beam.io.Write(sink) # pylint: disable=expression-not-assigned
p.run()
self.assertEqual(
open(temp_path.get() + '-00000-of-00001.output').read(), '[start][end]')
def test_fixed_shard_write(self):
temp_path = os.path.join(self._new_tempdir(), 'empty')
sink = MyFileBasedSink(
temp_path,
file_name_suffix='.output',
num_shards=3,
shard_name_template='_NN_SSS_',
coder=coders.ToStringCoder())
p = TestPipeline()
p | beam.Create(['a', 'b']) | beam.io.Write(sink) # pylint: disable=expression-not-assigned
p.run()
concat = ''.join(
open(temp_path + '_03_%03d_.output' % shard_num).read()
for shard_num in range(3))
self.assertTrue('][a][' in concat, concat)
self.assertTrue('][b][' in concat, concat)
# Not using 'test' in name so that 'nose' doesn't pick this as a test.
def run_temp_dir_check(self, no_dir_path, dir_path, no_dir_root_path,
dir_root_path, prefix, separator):
def _get_temp_dir(file_path_prefix):
sink = MyFileBasedSink(
file_path_prefix, file_name_suffix='.output',
coder=coders.ToStringCoder())
return sink.initialize_write()
temp_dir = _get_temp_dir(no_dir_path)
self.assertTrue(temp_dir.startswith(prefix))
last_sep = temp_dir.rfind(separator)
self.assertTrue(temp_dir[last_sep + 1:].startswith('beam-temp'))
temp_dir = _get_temp_dir(dir_path)
self.assertTrue(temp_dir.startswith(prefix))
last_sep = temp_dir.rfind(separator)
self.assertTrue(temp_dir[last_sep + 1:].startswith('beam-temp'))
with self.assertRaises(ValueError):
_get_temp_dir(no_dir_root_path)
with self.assertRaises(ValueError):
_get_temp_dir(dir_root_path)
def test_temp_dir_gcs(self):
try:
self.run_temp_dir_check(
'gs://aaa/bbb', 'gs://aaa/bbb/', 'gs://aaa', 'gs://aaa/', 'gs://',
'/')
except ValueError:
logging.debug('Ignoring test since GCP module is not installed')
@mock.patch('apache_beam.io.localfilesystem.os')
def test_temp_dir_local(self, filesystem_os_mock):
# Here we test a unix-like mock file-system
# (not really testing Unix or Windows since we mock the function of 'os'
# module).
def _fake_unix_split(path):
sep = path.rfind('/')
if sep < 0:
raise ValueError('Path must contain a separator')
return (path[:sep], path[sep + 1:])
def _fake_unix_join(base, path):
return base + '/' + path
filesystem_os_mock.path.abspath = lambda a: a
filesystem_os_mock.path.split.side_effect = _fake_unix_split
filesystem_os_mock.path.join.side_effect = _fake_unix_join
self.run_temp_dir_check(
'/aaa/bbb', '/aaa/bbb/', '/', '/', '/', '/')
def test_file_sink_multi_shards(self):
temp_path = os.path.join(self._new_tempdir(), 'multishard')
sink = MyFileBasedSink(
temp_path, file_name_suffix='.output', coder=coders.ToStringCoder())
# Manually invoke the generic Sink API.
init_token = sink.initialize_write()
num_shards = 1000
writer_results = []
for i in range(num_shards):
uuid = 'uuid-%05d' % i
writer = sink.open_writer(init_token, uuid)
writer.write('a')
writer.write('b')
writer.write(uuid)
writer_results.append(writer.close())
res_first = list(sink.finalize_write(init_token, writer_results))
# Retry the finalize operation (as if the first attempt was lost).
res_second = list(sink.finalize_write(init_token, writer_results))
self.assertItemsEqual(res_first, res_second)
res = sorted(res_second)
for i in range(num_shards):
shard_name = '%s-%05d-of-%05d.output' % (temp_path, i, num_shards)
uuid = 'uuid-%05d' % i
self.assertEqual(res[i], shard_name)
self.assertEqual(
open(shard_name).read(), ('[start][a][b][%s][end]' % uuid))
# Check that any temp files are deleted.
self.assertItemsEqual(res, glob.glob(temp_path + '*'))
def test_file_sink_io_error(self):
temp_path = os.path.join(self._new_tempdir(), 'ioerror')
sink = MyFileBasedSink(
temp_path, file_name_suffix='.output', coder=coders.ToStringCoder())
# Manually invoke the generic Sink API.
init_token = sink.initialize_write()
writer1 = sink.open_writer(init_token, '1')
writer1.write('a')
writer1.write('b')
res1 = writer1.close()
writer2 = sink.open_writer(init_token, '2')
writer2.write('x')
writer2.write('y')
writer2.write('z')
res2 = writer2.close()
os.remove(res2)
with self.assertRaises(Exception):
list(sink.finalize_write(init_token, [res1, res2]))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
|
#! /usr/bin/env python
"""Interfaces for launching and remotely controlling Web browsers."""
# Maintained by Georg Brandl.
import io
import os
import shlex
import sys
import stat
import subprocess
import time
__all__ = ["Error", "open", "open_new", "open_new_tab", "get", "register"]
class Error(Exception):
pass
_browsers = {} # Dictionary of available browser controllers
_tryorder = [] # Preference order of available browsers
def register(name, klass, instance=None, update_tryorder=1):
"""Register a browser connector and, optionally, connection."""
_browsers[name.lower()] = [klass, instance]
if update_tryorder > 0:
_tryorder.append(name)
elif update_tryorder < 0:
_tryorder.insert(0, name)
def get(using=None):
"""Return a browser launcher instance appropriate for the environment."""
if using is not None:
alternatives = [using]
else:
alternatives = _tryorder
for browser in alternatives:
if '%s' in browser:
# User gave us a command line, split it into name and args
browser = shlex.split(browser)
if browser[-1] == '&':
return BackgroundBrowser(browser[:-1])
else:
return GenericBrowser(browser)
else:
# User gave us a browser name or path.
try:
command = _browsers[browser.lower()]
except KeyError:
command = _synthesize(browser)
if command[1] is not None:
return command[1]
elif command[0] is not None:
return command[0]()
raise Error("could not locate runnable browser")
# Please note: the following definition hides a builtin function.
# It is recommended one does "import webbrowser" and uses webbrowser.open(url)
# instead of "from webbrowser import *".
def open(url, new=0, autoraise=1):
for name in _tryorder:
browser = get(name)
if browser.open(url, new, autoraise):
return True
return False
def open_new(url):
return open(url, 1)
def open_new_tab(url):
return open(url, 2)
def _synthesize(browser, update_tryorder=1):
"""Attempt to synthesize a controller base on existing controllers.
This is useful to create a controller when a user specifies a path to
an entry in the BROWSER environment variable -- we can copy a general
controller to operate using a specific installation of the desired
browser in this way.
If we can't create a controller in this way, or if there is no
executable for the requested browser, return [None, None].
"""
cmd = browser.split()[0]
if not _iscommand(cmd):
return [None, None]
name = os.path.basename(cmd)
try:
command = _browsers[name.lower()]
except KeyError:
return [None, None]
# now attempt to clone to fit the new name:
controller = command[1]
if controller and name.lower() == controller.basename:
import copy
controller = copy.copy(controller)
controller.name = browser
controller.basename = os.path.basename(browser)
register(browser, None, controller, update_tryorder)
return [None, controller]
return [None, None]
if sys.platform[:3] == "win":
def _isexecutable(cmd):
cmd = cmd.lower()
if os.path.isfile(cmd) and cmd.endswith((".exe", ".bat")):
return True
for ext in ".exe", ".bat":
if os.path.isfile(cmd + ext):
return True
return False
else:
def _isexecutable(cmd):
if os.path.isfile(cmd):
mode = os.stat(cmd)[stat.ST_MODE]
if mode & stat.S_IXUSR or mode & stat.S_IXGRP or mode & stat.S_IXOTH:
return True
return False
def _iscommand(cmd):
"""Return True if cmd is executable or can be found on the executable
search path."""
if _isexecutable(cmd):
return True
path = os.environ.get("PATH")
if not path:
return False
for d in path.split(os.pathsep):
exe = os.path.join(d, cmd)
if _isexecutable(exe):
return True
return False
# General parent classes
class BaseBrowser(object):
"""Parent class for all browsers. Do not use directly."""
args = ['%s']
def __init__(self, name=""):
self.name = name
self.basename = name
def open(self, url, new=0, autoraise=1):
raise NotImplementedError
def open_new(self, url):
return self.open(url, 1)
def open_new_tab(self, url):
return self.open(url, 2)
class GenericBrowser(BaseBrowser):
"""Class for all browsers started with a command
and without remote functionality."""
def __init__(self, name):
if isinstance(name, str):
self.name = name
self.args = ["%s"]
else:
# name should be a list with arguments
self.name = name[0]
self.args = name[1:]
self.basename = os.path.basename(self.name)
def open(self, url, new=0, autoraise=1):
cmdline = [self.name] + [arg.replace("%s", url)
for arg in self.args]
try:
if sys.platform[:3] == 'win':
p = subprocess.Popen(cmdline)
else:
p = subprocess.Popen(cmdline, close_fds=True)
return not p.wait()
except OSError:
return False
class BackgroundBrowser(GenericBrowser):
"""Class for all browsers which are to be started in the
background."""
def open(self, url, new=0, autoraise=1):
cmdline = [self.name] + [arg.replace("%s", url)
for arg in self.args]
try:
if sys.platform[:3] == 'win':
p = subprocess.Popen(cmdline)
else:
setsid = getattr(os, 'setsid', None)
if not setsid:
setsid = getattr(os, 'setpgrp', None)
p = subprocess.Popen(cmdline, close_fds=True, preexec_fn=setsid)
return (p.poll() is None)
except OSError:
return False
class UnixBrowser(BaseBrowser):
"""Parent class for all Unix browsers with remote functionality."""
raise_opts = None
remote_args = ['%action', '%s']
remote_action = None
remote_action_newwin = None
remote_action_newtab = None
background = False
redirect_stdout = True
def _invoke(self, args, remote, autoraise):
raise_opt = []
if remote and self.raise_opts:
# use autoraise argument only for remote invocation
autoraise = int(bool(autoraise))
opt = self.raise_opts[autoraise]
if opt: raise_opt = [opt]
cmdline = [self.name] + raise_opt + args
if remote or self.background:
inout = io.open(os.devnull, "r+")
else:
# for TTY browsers, we need stdin/out
inout = None
# if possible, put browser in separate process group, so
# keyboard interrupts don't affect browser as well as Python
setsid = getattr(os, 'setsid', None)
if not setsid:
setsid = getattr(os, 'setpgrp', None)
p = subprocess.Popen(cmdline, close_fds=True, stdin=inout,
stdout=(self.redirect_stdout and inout or None),
stderr=inout, preexec_fn=setsid)
if remote:
# wait five secons. If the subprocess is not finished, the
# remote invocation has (hopefully) started a new instance.
time.sleep(1)
rc = p.poll()
if rc is None:
time.sleep(4)
rc = p.poll()
if rc is None:
return True
# if remote call failed, open() will try direct invocation
return not rc
elif self.background:
if p.poll() is None:
return True
else:
return False
else:
return not p.wait()
def open(self, url, new=0, autoraise=1):
if new == 0:
action = self.remote_action
elif new == 1:
action = self.remote_action_newwin
elif new == 2:
if self.remote_action_newtab is None:
action = self.remote_action_newwin
else:
action = self.remote_action_newtab
else:
raise Error("Bad 'new' parameter to open(); " +
"expected 0, 1, or 2, got %s" % new)
args = [arg.replace("%s", url).replace("%action", action)
for arg in self.remote_args]
success = self._invoke(args, True, autoraise)
if not success:
# remote invocation failed, try straight way
args = [arg.replace("%s", url) for arg in self.args]
return self._invoke(args, False, False)
else:
return True
class Mozilla(UnixBrowser):
"""Launcher class for Mozilla/Netscape browsers."""
raise_opts = ["-noraise", "-raise"]
remote_args = ['-remote', 'openURL(%s%action)']
remote_action = ""
remote_action_newwin = ",new-window"
remote_action_newtab = ",new-tab"
background = True
Netscape = Mozilla
class Galeon(UnixBrowser):
"""Launcher class for Galeon/Epiphany browsers."""
raise_opts = ["-noraise", ""]
remote_args = ['%action', '%s']
remote_action = "-n"
remote_action_newwin = "-w"
background = True
class Opera(UnixBrowser):
"Launcher class for Opera browser."
raise_opts = ["", "-raise"]
remote_args = ['-remote', 'openURL(%s%action)']
remote_action = ""
remote_action_newwin = ",new-window"
remote_action_newtab = ",new-page"
background = True
class Elinks(UnixBrowser):
"Launcher class for Elinks browsers."
remote_args = ['-remote', 'openURL(%s%action)']
remote_action = ""
remote_action_newwin = ",new-window"
remote_action_newtab = ",new-tab"
background = False
# elinks doesn't like its stdout to be redirected -
# it uses redirected stdout as a signal to do -dump
redirect_stdout = False
class Konqueror(BaseBrowser):
"""Controller for the KDE File Manager (kfm, or Konqueror).
See the output of ``kfmclient --commands``
for more information on the Konqueror remote-control interface.
"""
def open(self, url, new=0, autoraise=1):
# XXX Currently I know no way to prevent KFM from opening a new win.
if new == 2:
action = "newTab"
else:
action = "openURL"
devnull = io.open(os.devnull, "r+")
# if possible, put browser in separate process group, so
# keyboard interrupts don't affect browser as well as Python
setsid = getattr(os, 'setsid', None)
if not setsid:
setsid = getattr(os, 'setpgrp', None)
try:
p = subprocess.Popen(["kfmclient", action, url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull)
except OSError:
# fall through to next variant
pass
else:
p.wait()
# kfmclient's return code unfortunately has no meaning as it seems
return True
try:
p = subprocess.Popen(["konqueror", "--silent", url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull,
preexec_fn=setsid)
except OSError:
# fall through to next variant
pass
else:
if p.poll() is None:
# Should be running now.
return True
try:
p = subprocess.Popen(["kfm", "-d", url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull,
preexec_fn=setsid)
except OSError:
return False
else:
return (p.poll() is None)
class Grail(BaseBrowser):
# There should be a way to maintain a connection to Grail, but the
# Grail remote control protocol doesn't really allow that at this
# point. It probably never will!
def _find_grail_rc(self):
import glob
import pwd
import socket
import tempfile
tempdir = os.path.join(tempfile.gettempdir(),
".grail-unix")
user = pwd.getpwuid(os.getuid())[0]
filename = os.path.join(tempdir, user + "-*")
maybes = glob.glob(filename)
if not maybes:
return None
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
for fn in maybes:
# need to PING each one until we find one that's live
try:
s.connect(fn)
except socket.error:
# no good; attempt to clean it out, but don't fail:
try:
os.unlink(fn)
except IOError:
pass
else:
return s
def _remote(self, action):
s = self._find_grail_rc()
if not s:
return 0
s.send(action)
s.close()
return 1
def open(self, url, new=0, autoraise=1):
if new:
ok = self._remote("LOADNEW " + url)
else:
ok = self._remote("LOAD " + url)
return ok
#
# Platform support for Unix
#
# These are the right tests because all these Unix browsers require either
# a console terminal or an X display to run.
def register_X_browsers():
# The default GNOME browser
if "GNOME_DESKTOP_SESSION_ID" in os.environ and _iscommand("gnome-open"):
register("gnome-open", None, BackgroundBrowser("gnome-open"))
# The default KDE browser
if "KDE_FULL_SESSION" in os.environ and _iscommand("kfmclient"):
register("kfmclient", Konqueror, Konqueror("kfmclient"))
# The Mozilla/Netscape browsers
for browser in ("mozilla-firefox", "firefox",
"mozilla-firebird", "firebird",
"seamonkey", "mozilla", "netscape"):
if _iscommand(browser):
register(browser, None, Mozilla(browser))
# Konqueror/kfm, the KDE browser.
if _iscommand("kfm"):
register("kfm", Konqueror, Konqueror("kfm"))
elif _iscommand("konqueror"):
register("konqueror", Konqueror, Konqueror("konqueror"))
# Gnome's Galeon and Epiphany
for browser in ("galeon", "epiphany"):
if _iscommand(browser):
register(browser, None, Galeon(browser))
# Skipstone, another Gtk/Mozilla based browser
if _iscommand("skipstone"):
register("skipstone", None, BackgroundBrowser("skipstone"))
# Opera, quite popular
if _iscommand("opera"):
register("opera", None, Opera("opera"))
# Next, Mosaic -- old but still in use.
if _iscommand("mosaic"):
register("mosaic", None, BackgroundBrowser("mosaic"))
# Grail, the Python browser. Does anybody still use it?
if _iscommand("grail"):
register("grail", Grail, None)
# Prefer X browsers if present
if os.environ.get("DISPLAY"):
register_X_browsers()
# Also try console browsers
if os.environ.get("TERM"):
# The Links/elinks browsers <http://artax.karlin.mff.cuni.cz/~mikulas/links/>
if _iscommand("links"):
register("links", None, GenericBrowser("links"))
if _iscommand("elinks"):
register("elinks", None, Elinks("elinks"))
# The Lynx browser <http://lynx.isc.org/>, <http://lynx.browser.org/>
if _iscommand("lynx"):
register("lynx", None, GenericBrowser("lynx"))
# The w3m browser <http://w3m.sourceforge.net/>
if _iscommand("w3m"):
register("w3m", None, GenericBrowser("w3m"))
#
# Platform support for Windows
#
if sys.platform[:3] == "win":
class WindowsDefault(BaseBrowser):
def open(self, url, new=0, autoraise=1):
try:
os.startfile(url)
except WindowsError:
# [Error 22] No application is associated with the specified
# file for this operation: '<URL>'
return False
else:
return True
_tryorder = []
_browsers = {}
# First try to use the default Windows browser
register("windows-default", WindowsDefault)
# Detect some common Windows browsers, fallback to IE
iexplore = os.path.join(os.environ.get("PROGRAMFILES", "C:\\Program Files"),
"Internet Explorer\\IEXPLORE.EXE")
for browser in ("firefox", "firebird", "seamonkey", "mozilla",
"netscape", "opera", iexplore):
if _iscommand(browser):
register(browser, None, BackgroundBrowser(browser))
#
# Platform support for MacOS
#
try:
import ic
except ImportError:
pass
else:
class InternetConfig(BaseBrowser):
def open(self, url, new=0, autoraise=1):
ic.launchurl(url)
return True # Any way to get status?
register("internet-config", InternetConfig, update_tryorder=-1)
if sys.platform == 'darwin':
# Adapted from patch submitted to SourceForge by Steven J. Burr
class MacOSX(BaseBrowser):
"""Launcher class for Aqua browsers on Mac OS X
Optionally specify a browser name on instantiation. Note that this
will not work for Aqua browsers if the user has moved the application
package after installation.
If no browser is specified, the default browser, as specified in the
Internet System Preferences panel, will be used.
"""
def __init__(self, name):
self.name = name
def open(self, url, new=0, autoraise=1):
assert "'" not in url
# hack for local urls
if not ':' in url:
url = 'file:'+url
# new must be 0 or 1
new = int(bool(new))
if self.name == "default":
# User called open, open_new or get without a browser parameter
script = 'open location "%s"' % url.replace('"', '%22') # opens in default browser
else:
# User called get and chose a browser
if self.name == "OmniWeb":
toWindow = ""
else:
# Include toWindow parameter of OpenURL command for browsers
# that support it. 0 == new window; -1 == existing
toWindow = "toWindow %d" % (new - 1)
cmd = 'OpenURL "%s"' % url.replace('"', '%22')
script = '''tell application "%s"
activate
%s %s
end tell''' % (self.name, cmd, toWindow)
# Open pipe to AppleScript through osascript command
osapipe = os.popen("osascript", "w")
if osapipe is None:
return False
# Write script to osascript's stdin
osapipe.write(script)
rc = osapipe.close()
return not rc
# Don't clear _tryorder or _browsers since OS X can use above Unix support
# (but we prefer using the OS X specific stuff)
register("MacOSX", None, MacOSX('default'), -1)
#
# Platform support for OS/2
#
if sys.platform[:3] == "os2" and _iscommand("netscape"):
_tryorder = []
_browsers = {}
register("os2netscape", None,
GenericBrowser(["start", "netscape", "%s"]), -1)
# OK, now that we know what the default preference orders for each
# platform are, allow user to override them with the BROWSER variable.
if "BROWSER" in os.environ:
_userchoices = os.environ["BROWSER"].split(os.pathsep)
_userchoices.reverse()
# Treat choices in same way as if passed into get() but do register
# and prepend to _tryorder
for cmdline in _userchoices:
if cmdline != '':
_synthesize(cmdline, -1)
cmdline = None # to make del work if _userchoices was empty
del cmdline
del _userchoices
# what to do if _tryorder is now empty?
def main():
import getopt
usage = """Usage: %s [-n | -t] url
-n: open new window
-t: open new tab""" % sys.argv[0]
try:
opts, args = getopt.getopt(sys.argv[1:], 'ntd')
except getopt.error as msg:
print(msg, file=sys.stderr)
print(usage, file=sys.stderr)
sys.exit(1)
new_win = 0
for o, a in opts:
if o == '-n': new_win = 1
elif o == '-t': new_win = 2
if len(args) != 1:
print(usage, file=sys.stderr)
sys.exit(1)
url = args[0]
open(url, new_win)
print("\a")
if __name__ == "__main__":
main()
|
|
# Copyright 2014 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TestExecutor executes tests."""
import contextlib
import enum
import logging
import pstats
import sys
import tempfile
import threading
import traceback
from typing import Iterator, List, Optional, Text, Type, TYPE_CHECKING
from openhtf import util
from openhtf.core import base_plugs
from openhtf.core import diagnoses_lib
from openhtf.core import phase_branches
from openhtf.core import phase_collections
from openhtf.core import phase_descriptor
from openhtf.core import phase_executor
from openhtf.core import phase_group
from openhtf.core import phase_nodes
from openhtf.core import test_record
from openhtf.core import test_state
from openhtf.util import conf
from openhtf.util import threads
if TYPE_CHECKING:
from openhtf.core import test_descriptor # pylint: disable=g-import-not-at-top
_LOG = logging.getLogger(__name__)
conf.declare(
'cancel_timeout_s',
default_value=2,
description='Timeout (in seconds) when the test has been cancelled'
'to wait for the running phase to exit.')
conf.declare(
'stop_on_first_failure',
default_value=False,
description='Stop current test execution and return Outcome FAIL'
'on first phase with failed measurement.')
class TestExecutionError(Exception):
"""Raised when there's an internal error during test execution."""
class TestStopError(Exception):
"""Test is being stopped."""
class _ExecutorReturn(enum.Enum):
CONTINUE = 0
TERMINAL = 1
def _more_critical(e1: _ExecutorReturn, e2: _ExecutorReturn) -> _ExecutorReturn:
return _ExecutorReturn(max(e1.value, e2.value))
def combine_profile_stats(profile_stats_iter: List[pstats.Stats],
output_filename: Text) -> None:
"""Given an iterable of pstats.Stats, combine them into a single Stats."""
profile_stats_filenames = []
for profile_stats in profile_stats_iter:
with tempfile.NamedTemporaryFile(delete=False) as f:
profile_stats_filename = f.name
profile_stats.dump_stats(profile_stats_filename)
profile_stats_filenames.append(profile_stats_filename)
if profile_stats_filenames:
pstats.Stats(*profile_stats_filenames).dump_stats(output_filename)
# pylint: disable=too-many-instance-attributes
class TestExecutor(threads.KillableThread):
"""Encompasses the execution of a single test."""
daemon = True
def __init__(self, test_descriptor: 'test_descriptor.TestDescriptor',
execution_uid: Text,
test_start: Optional[phase_descriptor.PhaseDescriptor],
test_options: 'test_descriptor.TestOptions',
run_with_profiling: bool):
super(TestExecutor, self).__init__(
name='TestExecutorThread', run_with_profiling=run_with_profiling)
self.test_state = None # type: Optional[test_state.TestState]
self._test_descriptor = test_descriptor
self._test_start = test_start
self._test_options = test_options
self._lock = threading.Lock()
self._phase_exec = None # type: Optional[phase_executor.PhaseExecutor]
self.uid = execution_uid
self._last_outcome = None # type: Optional[phase_executor.PhaseExecutionOutcome]
self._abort = threading.Event()
self._full_abort = threading.Event()
# This is a reentrant lock so that the teardown logic that prevents aborts
# affects nested sequences.
self._teardown_phases_lock = threading.RLock()
# Populated if profiling is enabled.
self._phase_profile_stats = [] # type: List[pstats.Stats]
@property
def logger(self) -> logging.Logger:
return self.test_state.state_logger
@property
def phase_profile_stats(self) -> List[pstats.Stats]:
"""Returns iterable of profiling Stats objects, per phase."""
return self._phase_profile_stats
def close(self) -> None:
"""Close and remove any global registrations.
Always call this function when finished with this instance.
This function is defined instead of a __del__ function because Python calls
the __del__ function unreliably.
"""
self.wait()
self.test_state.close()
def abort(self) -> None:
"""Abort this test."""
if self._abort.is_set():
_LOG.error('Abort already set; forcibly stopping the process.')
self._full_abort.set()
self._stop_phase_executor(force=True)
return
_LOG.error('Abort test executor.')
# Deterministically mark the test as aborted.
self._abort.set()
self._stop_phase_executor()
# No need to kill this thread because the abort state has been set, it will
# end as soon as all queued teardown phases are run.
def finalize(self) -> test_state.TestState:
"""Finalize test execution and output resulting record to callbacks.
Should only be called once at the conclusion of a test run, and will raise
an exception if end_time_millis is already set.
Returns:
Finalized TestState. It must not be modified after this call.
Raises:
TestStopError: test
TestAlreadyFinalized if end_time_millis already set.
"""
if not self.test_state:
raise TestStopError('Test Stopped.')
if self.test_state.test_record.dut_id is None:
_LOG.warning('DUT ID is still not set; using default.')
self.test_state.test_record.dut_id = self._test_options.default_dut_id
return self.test_state
def wait(self) -> None:
"""Waits until death."""
# Must use a timeout here in case this is called from the main thread.
# Otherwise, the SIGINT abort logic in test_descriptor will not get called.
timeout = 31557600 # Seconds in a year.
if sys.version_info >= (3, 2):
# TIMEOUT_MAX can be too large and cause overflows on 32-bit OSes, so take
# whichever timeout is shorter.
timeout = min(threading.TIMEOUT_MAX, timeout) # pytype: disable=module-attr
self.join(timeout)
def _thread_proc(self) -> None:
"""Handles one whole test from start to finish."""
try:
# Top level steps required to run a single iteration of the Test.
self.test_state = test_state.TestState(self._test_descriptor, self.uid,
self._test_options)
phase_exec = phase_executor.PhaseExecutor(self.test_state)
# Any access to self._exit_stacks must be done while holding this lock.
with self._lock:
self._phase_exec = phase_exec
if self._test_start is not None and self._execute_test_start():
# Exit early if test_start returned a terminal outcome of any kind.
return
self.test_state.mark_test_started()
# Full plug initialization happens _after_ the start trigger, as close to
# test execution as possible, for the best chance of test equipment being
# in a known-good state at the start of test execution.
if self._initialize_plugs():
return
# Everything is set, set status and begin test execution.
self.test_state.set_status_running()
self._execute_node(self._test_descriptor.phase_sequence, None, False)
self._execute_test_diagnosers()
except: # pylint: disable=bare-except
stacktrace = traceback.format_exc()
_LOG.error('Error in TestExecutor: \n%s', stacktrace)
raise
finally:
self._execute_test_teardown()
def _initialize_plugs(
self,
plug_types: Optional[List[Type[base_plugs.BasePlug]]] = None) -> bool:
"""Initialize plugs.
Args:
plug_types: optional list of plug classes to initialize.
Returns:
True if there was an error initializing the plugs.
"""
try:
self.test_state.plug_manager.initialize_plugs(plug_types=plug_types)
return False
except Exception: # pylint: disable=broad-except
# Record the equivalent failure outcome and exit early.
self._last_outcome = phase_executor.PhaseExecutionOutcome(
phase_executor.ExceptionInfo(*sys.exc_info()))
return True
def _execute_test_start(self) -> bool:
"""Run the start trigger phase, and check that the DUT ID is set after.
Initializes any plugs used in the trigger.
Logs a warning if the start trigger failed to set the DUT ID.
The test start is special because we wait to initialize all other plugs
until this phase runs.
Returns:
True if there was a terminal error either setting up or running the test
start phase.
"""
# Have the phase executor run the start trigger phase. Do partial plug
# initialization for just the plugs needed by the start trigger phase.
if self._initialize_plugs(
plug_types=[phase_plug.cls for phase_plug in self._test_start.plugs]):
return True
outcome, profile_stats = self._phase_exec.execute_phase(
self._test_start, self._run_with_profiling)
if profile_stats is not None:
self._phase_profile_stats.append(profile_stats)
if outcome.is_terminal:
self._last_outcome = outcome
return True
if self.test_state.test_record.dut_id is None:
_LOG.warning('Start trigger did not set a DUT ID.')
return False
def _stop_phase_executor(self, force: bool = False) -> None:
with self._lock:
phase_exec = self._phase_exec
if not phase_exec:
# The test executor has not started yet, so no stopping is required.
return
if not force and not self._teardown_phases_lock.acquire(False):
# If locked, teardown phases are running, so do not cancel those.
return
try:
phase_exec.stop(timeout_s=conf.cancel_timeout_s)
# Resetting so phase_exec can run teardown phases.
phase_exec.reset_stop()
finally:
if not force:
self._teardown_phases_lock.release()
def _execute_test_teardown(self) -> None:
# Plug teardown does not affect the test outcome.
self.test_state.plug_manager.tear_down_plugs()
# Now finalize the test state.
if self._abort.is_set():
self.logger.debug('Finishing test with outcome ABORTED.')
self.test_state.abort()
elif self._last_outcome and self._last_outcome.is_terminal:
self.test_state.finalize_from_phase_outcome(self._last_outcome)
else:
self.test_state.finalize_normally()
def _execute_phase(self, phase: phase_descriptor.PhaseDescriptor,
subtest_rec: Optional[test_record.SubtestRecord],
in_teardown: bool) -> _ExecutorReturn:
if subtest_rec:
self.logger.debug('Executing phase %s under subtest %s', phase.name,
subtest_rec.name)
else:
self.logger.debug('Executing phase %s', phase.name)
if not in_teardown and subtest_rec and subtest_rec.is_fail:
self._phase_exec.skip_phase(phase, subtest_rec)
return _ExecutorReturn.CONTINUE
outcome, profile_stats = self._phase_exec.execute_phase(
phase,
run_with_profiling=self._run_with_profiling,
subtest_rec=subtest_rec)
if profile_stats is not None:
self._phase_profile_stats.append(profile_stats)
if (self.test_state.test_options.stop_on_first_failure or
conf.stop_on_first_failure):
# Stop Test on first measurement failure
current_phase_result = self.test_state.test_record.phases[
len(self.test_state.test_record.phases) - 1]
if current_phase_result.outcome == test_record.PhaseOutcome.FAIL:
outcome = phase_executor.PhaseExecutionOutcome(
phase_descriptor.PhaseResult.STOP)
self.logger.error('Stopping test because stop_on_first_failure is True')
if outcome.is_terminal:
if not self._last_outcome:
self._last_outcome = outcome
return _ExecutorReturn.TERMINAL
if outcome.is_fail_subtest:
if not subtest_rec:
raise TestExecutionError(
'INVALID STATE: Phase returned outcome FAIL_SUBTEST when not '
'in subtest.')
subtest_rec.outcome = test_record.SubtestOutcome.FAIL
return _ExecutorReturn.CONTINUE
def _execute_checkpoint(self, checkpoint: phase_branches.Checkpoint,
subtest_rec: Optional[test_record.SubtestRecord],
in_teardown: bool) -> _ExecutorReturn:
if not in_teardown and subtest_rec and subtest_rec.is_fail:
self._phase_exec.skip_checkpoint(checkpoint, subtest_rec)
return _ExecutorReturn.CONTINUE
outcome = self._phase_exec.evaluate_checkpoint(checkpoint, subtest_rec)
if outcome.is_terminal:
if not self._last_outcome:
self._last_outcome = outcome
return _ExecutorReturn.TERMINAL
if outcome.is_fail_subtest:
if not subtest_rec:
raise TestExecutionError(
'INVALID STATE: Phase returned outcome FAIL_SUBTEST when not '
'in subtest.')
subtest_rec.outcome = test_record.SubtestOutcome.FAIL
return _ExecutorReturn.CONTINUE
def _log_sequence(self, phase_sequence, override_message):
message = phase_sequence.name
if override_message:
message = override_message
if message:
self.logger.debug('Executing phase nodes for %s', message)
def _execute_sequence(
self,
phase_sequence: phase_collections.PhaseSequence,
subtest_rec: Optional[test_record.SubtestRecord],
in_teardown: bool,
override_message: Optional[Text] = None) -> _ExecutorReturn:
"""Execute phase sequence.
Args:
phase_sequence: Sequence of phase nodes to run.
subtest_rec: Current subtest record, if any.
in_teardown: Indicates if currently processing a teardown sequence.
override_message: Optional message to override when logging.
Returns:
_ExecutorReturn for how to proceed.
"""
self._log_sequence(phase_sequence, override_message)
if in_teardown:
return self._execute_teardown_sequence(phase_sequence, subtest_rec)
else:
return self._execute_abortable_sequence(phase_sequence, subtest_rec)
def _execute_abortable_sequence(
self, phase_sequence: phase_collections.PhaseSequence,
subtest_rec: Optional[test_record.SubtestRecord]) -> _ExecutorReturn:
"""Execute phase sequence, returning immediately on error or test abort.
Args:
phase_sequence: Sequence of phase nodes to run.
subtest_rec: Current subtest record, if any.
Returns:
_ExecutorReturn for how to proceed.
"""
for node in phase_sequence.nodes:
if self._abort.is_set():
return _ExecutorReturn.TERMINAL
exe_ret = self._execute_node(node, subtest_rec, False)
if exe_ret != _ExecutorReturn.CONTINUE:
return exe_ret
return _ExecutorReturn.CONTINUE
def _execute_teardown_sequence(
self, phase_sequence: phase_collections.PhaseSequence,
subtest_rec: Optional[test_record.SubtestRecord]) -> _ExecutorReturn:
"""Execute all the teardown phases, regardless of errors.
Args:
phase_sequence: Sequence of phase nodes to run.
subtest_rec: Current subtest record, if any.
Returns:
_ExecutorReturn for how to proceed.
"""
ret = _ExecutorReturn.CONTINUE
with self._teardown_phases_lock:
for node in phase_sequence.nodes:
if self._full_abort.is_set():
return _ExecutorReturn.TERMINAL
ret = _more_critical(ret, self._execute_node(node, subtest_rec, True))
return ret
@contextlib.contextmanager
def _subtest_context(
self, subtest: phase_collections.Subtest
) -> Iterator[test_record.SubtestRecord]:
"""Enter a subtest context.
This context tracks the subname and sets up the subtest record to track the
timing.
Args:
subtest: The subtest running during the context.
Yields:
The subtest record for updating the outcome.
"""
self.logger.debug('%s: Starting subtest.', subtest.name)
subtest_rec = test_record.SubtestRecord(
name=subtest.name,
start_time_millis=util.time_millis(),
outcome=test_record.SubtestOutcome.PASS)
yield subtest_rec
subtest_rec.end_time_millis = util.time_millis()
self.test_state.test_record.add_subtest_record(subtest_rec)
def _execute_subtest(self, subtest: phase_collections.Subtest,
outer_subtest_rec: Optional[test_record.SubtestRecord],
in_teardown: bool) -> _ExecutorReturn:
"""Run a subtest node."""
with self._subtest_context(subtest) as subtest_rec:
if outer_subtest_rec and outer_subtest_rec.is_fail:
subtest_rec.outcome = test_record.SubtestOutcome.FAIL
ret = self._execute_sequence(subtest, subtest_rec, in_teardown)
if ret == _ExecutorReturn.TERMINAL:
subtest_rec.outcome = test_record.SubtestOutcome.STOP
self.logger.debug('%s: Subtest stopping the test.', subtest.name)
else:
if subtest_rec.outcome is test_record.SubtestOutcome.FAIL:
self.logger.debug('%s: Subtest failed;', subtest.name)
else:
self.logger.debug('%s: Subtest passed.', subtest.name)
return ret
def _execute_phase_branch(self, branch: phase_branches.BranchSequence,
subtest_rec: Optional[test_record.SubtestRecord],
in_teardown: bool) -> _ExecutorReturn:
branch_message = branch.diag_condition.message
if branch.name:
branch_message = '{}:{}'.format(branch.name, branch_message)
if not in_teardown and subtest_rec and subtest_rec.is_fail:
self.logger.debug('%s: Branch not being run due to failed subtest.',
branch_message)
return _ExecutorReturn.CONTINUE
evaluated_millis = util.time_millis()
if branch.should_run(self.test_state.diagnoses_manager.store):
self.logger.debug('%s: Branch condition met; running phases.',
branch_message)
branch_taken = True
ret = self._execute_sequence(branch, subtest_rec, in_teardown)
else:
self.logger.debug('%s: Branch condition NOT met; not running sequence.',
branch_message)
branch_taken = False
ret = _ExecutorReturn.CONTINUE
branch_rec = test_record.BranchRecord.from_branch(branch, branch_taken,
evaluated_millis)
self.test_state.test_record.add_branch_record(branch_rec)
return ret
def _execute_phase_group(self, group: phase_group.PhaseGroup,
subtest_rec: Optional[test_record.SubtestRecord],
in_teardown: bool) -> _ExecutorReturn:
"""Executes the phases in a phase group.
This will run the phases in the phase group, ensuring if the setup
phases all run without error that the teardown phases will also run, no
matter the errors during the main phases.
This function is recursive. Do not construct phase groups that contain
themselves.
Args:
group: phase_group.PhaseGroup, the phase group to execute.
subtest_rec: Current subtest record, if any.
in_teardown: Indicates if currently processing a teardown sequence.
Returns:
True if the phases are terminal; otherwise returns False.
"""
message_prefix = ''
if group.name:
self.logger.debug('Entering PhaseGroup %s', group.name)
message_prefix = group.name + ':'
# If in a subtest and it is already failing, the group will not be entered,
# so the teardown phases will need to be skipped.
skip_teardown = subtest_rec is not None and subtest_rec.is_fail
if group.setup:
setup_ret = self._execute_sequence(
group.setup,
subtest_rec,
in_teardown,
override_message=message_prefix + 'setup')
if setup_ret != _ExecutorReturn.CONTINUE:
return setup_ret
if not skip_teardown:
# If the subtest fails during the setup, the group is still not entered,
# so skip the teardown phases here as well.
skip_teardown = (subtest_rec is not None and subtest_rec.is_fail)
if group.main:
main_ret = self._execute_sequence(
group.main,
subtest_rec,
in_teardown,
override_message=message_prefix + 'main')
else:
main_ret = _ExecutorReturn.CONTINUE
if group.teardown:
teardown_ret = self._execute_sequence(
group.teardown,
subtest_rec,
# If the subtest is already failing, record skips during the teardown
# sequence.
not skip_teardown,
override_message=message_prefix + 'teardown')
else:
teardown_ret = _ExecutorReturn.CONTINUE
return _more_critical(main_ret, teardown_ret)
def _execute_node(self, node: phase_nodes.PhaseNode,
subtest_rec: Optional[test_record.SubtestRecord],
in_teardown: bool) -> _ExecutorReturn:
if isinstance(node, phase_collections.Subtest):
return self._execute_subtest(node, subtest_rec, in_teardown)
if isinstance(node, phase_branches.BranchSequence):
return self._execute_phase_branch(node, subtest_rec, in_teardown)
if isinstance(node, phase_collections.PhaseSequence):
return self._execute_sequence(node, subtest_rec, in_teardown)
if isinstance(node, phase_group.PhaseGroup):
return self._execute_phase_group(node, subtest_rec, in_teardown)
if isinstance(node, phase_descriptor.PhaseDescriptor):
return self._execute_phase(node, subtest_rec, in_teardown)
if isinstance(node, phase_branches.Checkpoint):
return self._execute_checkpoint(node, subtest_rec, in_teardown)
self.logger.error('Unhandled node type: %s', node)
return _ExecutorReturn.TERMINAL
def _execute_test_diagnoser(
self, diagnoser: diagnoses_lib.BaseTestDiagnoser) -> None:
try:
self.test_state.diagnoses_manager.execute_test_diagnoser(
diagnoser, self.test_state.test_record)
except Exception: # pylint: disable=broad-except
if self._last_outcome and self._last_outcome.is_terminal:
self.logger.exception(
'Test Diagnoser %s raised an exception, but the test outcome is '
'already terminal; logging additional exception here.',
diagnoser.name)
else:
# Record the equivalent failure outcome and exit early.
self._last_outcome = phase_executor.PhaseExecutionOutcome(
phase_executor.ExceptionInfo(*sys.exc_info()))
def _execute_test_diagnosers(self) -> None:
for diagnoser in self._test_options.diagnosers:
self._execute_test_diagnoser(diagnoser)
|
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Validate or replace the standard gdata authorization token."""
import filecmp
import optparse
import os
import shutil
from chromite.cbuildbot import constants
from chromite.lib import cros_build_lib as build_lib
from chromite.lib import operation
MODULE = os.path.splitext(os.path.basename(__file__))[0]
oper = operation.Operation(MODULE)
TOKEN_FILE = os.path.join(os.environ['HOME'], '.gdata_token')
CRED_FILE = os.path.join(os.environ['HOME'], '.gdata_cred.txt')
def _ChrootPathToExternalPath(path):
"""Translate |path| inside chroot to external path to same location."""
if path:
return os.path.join(constants.SOURCE_ROOT,
constants.DEFAULT_CHROOT_DIR,
path.lstrip('/'))
return None
class OutsideChroot(object):
"""Class for managing functionality when run outside chroot."""
def __init__(self, args):
self.args = args
def Run(self):
"""Re-start |args| inside chroot and copy out auth file."""
# Note that enter_chroot (cros_sdk) will automatically copy both
# the token file and the cred file into the chroot, so no need
# to do that here.
# Rerun the same command that launched this run inside the chroot.
cmd = [MODULE] + self.args
result = build_lib.RunCommand(cmd, enter_chroot=True,
print_cmd=False, error_code_ok=True)
if result.returncode != 0:
oper.Die('Token validation failed, exit code was %r.' %
result.returncode)
# Copy the token file back from chroot if different.
chroot_token_file = _ChrootPathToExternalPath(TOKEN_FILE)
if not os.path.exists(chroot_token_file):
oper.Die('No token file generated inside chroot.')
elif (not os.path.exists(TOKEN_FILE) or not
filecmp.cmp(TOKEN_FILE, chroot_token_file)):
oper.Notice('Copying new token file from chroot to %r' % TOKEN_FILE)
shutil.copy2(chroot_token_file, TOKEN_FILE)
else:
oper.Notice('No change in token file.')
class InsideChroot(object):
"""Class for managing functionality when run inside chroot.
Note that some additional imports happen within code in this class
because those imports are only available inside the chroot.
"""
def __init__(self):
self.creds = None # gdata_lib.Creds object.
self.gd_client = None # For interacting with Google Docs.
self.it_client = None # For interacting with Issue Tracker.
def _LoadTokenFile(self):
"""Load existing auth token file."""
if not os.path.exists(TOKEN_FILE):
oper.Warning('No current token file at %r.' % TOKEN_FILE)
return False
# Load token file, if it exists.
self.creds.LoadAuthToken(TOKEN_FILE)
return True
def _SaveTokenFile(self):
"""Save to auth toke file if anything changed."""
self.creds.StoreAuthTokenIfNeeded(TOKEN_FILE)
def _ValidateDocsToken(self):
"""Validate the existing Docs token."""
# pylint: disable=W0404
import gdata.service
if not self.creds.docs_auth_token:
return False
oper.Notice('Attempting to log into Docs using auth token.')
self.gd_client.source = 'Package Status'
self.gd_client.SetClientLoginToken(self.creds.docs_auth_token)
try:
# Try to access generic spreadsheets feed, which will check access.
self.gd_client.GetSpreadsheetsFeed()
# Token accepted. We're done here.
oper.Notice('Docs token validated.')
return True
except gdata.service.RequestError as ex:
reason = ex[0]['reason']
if reason == 'Token expired':
return False
raise
def _GenerateDocsToken(self):
"""Generate a new Docs token from credentials."""
# pylint: disable=W0404
import gdata.service
oper.Warning('Docs token not valid. Will try to generate a new one.')
self.creds.LoadCreds(CRED_FILE)
self.gd_client.email = self.creds.user
self.gd_client.password = self.creds.password
try:
self.gd_client.ProgrammaticLogin()
self.creds.SetDocsAuthToken(self.gd_client.GetClientLoginToken())
oper.Notice('New Docs token generated.')
return True
except gdata.service.BadAuthentication:
oper.Error('Credentials from %r not accepted.'
' Unable to generate new Docs token.' % CRED_FILE)
return False
def _ValidateTrackerToken(self):
"""Validate the existing Tracker token."""
# pylint: disable=W0404
import gdata.gauth
import gdata.projecthosting.client
if not self.creds.tracker_auth_token:
return False
oper.Notice('Attempting to log into Tracker using auth token.')
self.it_client.source = 'Package Status'
self.it_client.auth_token = gdata.gauth.ClientLoginToken(
self.creds.tracker_auth_token)
try:
# Try to access Tracker Issue #1, which will check access.
query = gdata.projecthosting.client.Query(issue_id='1')
self.it_client.get_issues('chromium-os', query=query)
# Token accepted. We're done here.
oper.Notice('Tracker token validated.')
return True
except gdata.client.Error:
# Exception is gdata.client.Unauthorized in the case of bad token, but
# I do not know what the error is for an expired token so I do not
# want to limit the catching here. All the errors for gdata.client
# functionality extend gdata.client.Error (I do not see one that is
# obviously about an expired token).
return False
def _GenerateTrackerToken(self):
"""Generate a new Tracker token from credentials."""
# pylint: disable=W0404
import gdata.client
oper.Warning('Tracker token not valid. Will try to generate a new one.')
self.creds.LoadCreds(CRED_FILE)
try:
self.it_client.ClientLogin(self.creds.user, self.creds.password,
source='Package Status', service='code',
account_type='GOOGLE')
self.creds.SetTrackerAuthToken(self.it_client.auth_token.token_string)
oper.Notice('New Tracker token generated.')
return True
except gdata.client.BadAuthentication:
oper.Error('Credentials from %r not accepted.'
' Unable to generate new Tracker token.' % CRED_FILE)
return False
def Run(self):
"""Validate existing auth token or generate new one from credentials."""
# pylint: disable=W0404
import chromite.lib.gdata_lib as gdata_lib
import gdata.spreadsheet.service
self.creds = gdata_lib.Creds()
self.gd_client = gdata.spreadsheet.service.SpreadsheetsService()
self.it_client = gdata.projecthosting.client.ProjectHostingClient()
self._LoadTokenFile()
if not self._ValidateTrackerToken():
if not self._GenerateTrackerToken():
oper.Die('Failed to validate or generate Tracker token.')
if not self._ValidateDocsToken():
if not self._GenerateDocsToken():
oper.Die('Failed to validate or generate Docs token.')
self._SaveTokenFile()
def _CreateParser():
usage = 'Usage: %prog'
epilog = ('\n'
'Run outside of chroot to validate the gdata '
'token file at %r or update it if it has expired.\n'
'To update the token file there must be a valid '
'credentials file at %r.\n'
'If run inside chroot the updated token file is '
'still valid but will not be preserved if chroot\n'
'is deleted.\n' %
(TOKEN_FILE, CRED_FILE))
return optparse.OptionParser(usage=usage, epilog=epilog)
def main(argv):
"""Main function."""
# Create a copy of args just to be safe.
argv = list(argv)
# No actual options used, but --help is still supported.
parser = _CreateParser()
(_options, args) = parser.parse_args(argv)
if args:
parser.print_help()
oper.Die('No arguments allowed.')
if build_lib.IsInsideChroot():
InsideChroot().Run()
else:
OutsideChroot(args).Run()
|
|
# -*- coding: utf-8 -*-
"""
Model-Definitions and Signal-Handlers for radonCMS
"""
import os
import re
from django.db import models
from django.db.models.signals import pre_save
from django.db.models.signals import post_delete
from django.template.defaultfilters import filesizeformat
from radoncms.managers import PageManager
from radoncms.signalhandlers import ContentProcessor
from radoncms.signalhandlers import set_unique_name
from radoncms.signalhandlers import set_unique_slug
from radoncms.signalhandlers import delete_media_file
# media-files locations:
IMAGE_UPLOAD_DIR = os.path.join('img', '%Y', '%m')
DOCUMENT_UPLOAD_DIR = os.path.join('doc', '%Y', '%m')
# -----------------------------------------------------------------------------
# Page-Model-Definitions
# -----------------------------------------------------------------------------
class Page(models.Model):
"""
Hierarchical page object.
content: extended reStructuredText
publish: Flag, override start_publish_date and stop_publish_date
nav_title: alternate shorter page title for navigation-links
nav_item: if False this page will not be part of navigation-lists
The methods 'breadcrumbs', 'childpages' and 'siblings' can be called
from the template like page-object-attributes but will return
querysets of other page-objects for navigation purposes.
"""
title = models.CharField(max_length=200)
content = models.TextField(blank=True)
description = models.TextField(blank=True)
processed_content = models.TextField(blank=True)
slug = models.SlugField(max_length=210)
parent_page = models.ForeignKey('self', blank=True, null=True,
related_name='sub_pages')
sibling_id = models.IntegerField(default=1, db_index=True)
publish = models.BooleanField(default=False, db_index=True)
start_publish_date = models.DateField(blank=True, null=True, db_index=True)
stop_publish_date = models.DateField(blank=True, null=True, db_index=True)
nav_title = models.CharField(max_length=100, blank=True)
nav_item = models.BooleanField(default=True, db_index=True)
objects = PageManager()
@models.permalink
def get_absolute_url(self):
"""
Returns the url for this page-object.
"""
return ('slug_page', (), {
'slug': self.slug})
def __unicode__(self):
return self.short_title()
def short_title(self, max_words=6):
"""
Returns the title truncated to max_words
"""
words = self.title.split()
if len(words) > max_words:
title = " ".join(words[:max_words]) + " ..."
else:
title = self.title
return title
def get_nav_title(self):
"""
Returns the nav-title if existent, else returns the regular page
title.
"""
if self.nav_title:
return self.nav_title
return self.__unicode__()
def breadcrumbs(self):
"""
Returns a list of hierarchical page-objects from the homepage
down to this page-object. Because we don't use a 'modified
preorder tree traversal' structure this will hit the database
multiple times (O(n) with n = depth of page in the tree).
"""
breadcrumbs = [self]
parent_page = self.parent_page
while parent_page is not None:
breadcrumbs.append(parent_page)
parent_page = parent_page.parent_page # walk up to the root
breadcrumbs.reverse() # change list to top down
return breadcrumbs
def childpages(self, publish=True):
"""
Returns a queryset of the childpages from this page-object
reduced to the title, nav_title and slug attributes.
"""
childpages = Page.objects.filter_childpages(self, publish=publish)
return childpages.only('title', 'nav_title', 'slug')
def siblings(self, publish=True):
"""
Returns a queryset of the siblingpages from this page-object
reduced to the title, nav_title and slug attributes.
"""
siblings = Page.objects.filter_siblings(self, publish=publish)
return siblings.only('title', 'nav_title', 'slug')
# -----------------------------------------------------------------------------
# MediaDB-Model-Definitions:
# -----------------------------------------------------------------------------
class ImageCollection(models.Model):
"""
Class to group Images to Collections. If you have a lot of images
grouping them can make it easier to manage them in the django-backend.
"""
name = models.CharField(max_length=128, unique=True, default='')
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
class Image(models.Model):
"""
Image-objects to include reStructedText-contents.
The 'name'-attribute is used for reference and have to be unique.
"""
collection = models.ForeignKey('ImageCollection')
img = models.ImageField(upload_to=IMAGE_UPLOAD_DIR)
name = models.CharField(max_length=200, db_index=True, default='')
def __unicode__(self):
return self.name
def get_file_item(self):
"""For file deletion on post_delete."""
return self.img
def get_size(self):
"""For use by django admin."""
return filesizeformat(self.img.size)
def url(self):
return self.img.url
def width(self):
return self.img.width
def height(self):
return self.img.height
def preview(self):
width = self.img.width
height = self.img.height
f = 80.0 / max(width, height)
width *= f
height *= f
return '<img src="{0}" width="{1}" height="{2}" />'.format(
self.url(), width, height)
preview.allow_tags = True
class DocumentCollection(models.Model):
"""
Class to group Documents to Collections. If you have a lot of documents
grouping them can make it easier to manage them in the django-backend.
"""
name = models.CharField(max_length=128, unique=True, default='')
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
class Document(models.Model):
"""
Document-object for all kinds of files for download.
"""
collection = models.ForeignKey('DocumentCollection')
doc = models.FileField(upload_to=DOCUMENT_UPLOAD_DIR)
name = models.CharField(max_length=200, db_index=True, default='')
def __unicode__(self):
return self.name
def get_file_item(self):
"""For file deletion on post_delete."""
return self.doc
def get_size(self):
"""For use by django admin."""
return filesizeformat(self.doc.size)
def get_name(self):
"""For use by django admin."""
return self.doc.name
# -----------------------------------------------------------------------------
# Signal-Handlers:
# -----------------------------------------------------------------------------
def process_content(sender, **kwargs):
"""
This is a wrapper sending additional Image and Document parameters
to the processor to prevent a circular import.
"""
_ = ContentProcessor(kwargs['instance'], Image, Document)()
# register signal-handlers:
pre_save.connect(process_content, sender=Page)
pre_save.connect(set_unique_slug, sender=Page)
pre_save.connect(set_unique_name, sender=Image)
pre_save.connect(set_unique_name, sender=Document)
post_delete.connect(delete_media_file, sender=Image)
post_delete.connect(delete_media_file, sender=Document)
|
|
import csv
import glob
import os
import codecs
import re
import sys
import traceback
#import random
from collections import Counter
from itertools import chain
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from matteautils.randomdict import RandomDict
from matteautils.base import TSVReader, UnicodeDictReader, printd
import dataset
from dataset import Dataset, SentencePair, MatchWriter
import matteautils.config as conf
import random
try:
stopl = set(stopwords.words('english'))
except LookupError:
nltk.download('stopwords')
nltk.download('ptb')
nltk.download('punkt')
stopl = set(stopwords.words('english'))
stemmer = PorterStemmer()
class NuggetDataset(Dataset):
def __init__(self, path, neg_samples=5):
random.seed(42)
pfiles = os.listdir(path)
printd("Loading dataset")
alldata = []
size = 0
if "train" in pfiles:
for d in ["train", "test", "valid"]:
if d not in pfiles:
continue
dpath = os.path.join(path, d)
nset = NuggetSet(dpath, neg_samples)
size += nset.size
alldata.extend([nset.nuggets, nset.updates])
setattr(self, d, nset.pairs)
else:
nset = NuggetSet(path, neg_samples)
alldata.extend([nset.nuggets, nset.updates])
self.test = nset.pairs
size += nset.size
self.size = size
self.data = Superset(*alldata)
self.writer = nset.writer
if len(self.valid()) != 0:
self._train = SuperList(self.train(), self.valid())
self.train = lambda: self._train
@classmethod
def identify(cls, path):
pfiles = os.listdir(path)
if "train" in pfiles:
pfiles += os.listdir(os.path.join(path, "train"))
return (("nuggets.tsv" in pfiles) or
("gold_iunits.tsv" in pfiles) or
("0001.matches.txt" in pfiles))
def train(self):
yield None
def valid(self):
yield None
def test(self):
yield None
#matches = self.matches
#for nugget in self.nuggets:
# for update in self.updates:
# yield SentencePair(nugget, update, label=matches.match(nugget, update))
def maxShortSentence(self):
ls = Cycle([0, 0])
try:
for dset in self.data:
l = ls.nextitem()
for s in dset:
cl = len(s["wv_tokens"])
if cl > l:
l = cl
ls.setitem(l)
except KeyError, e:
printd(e, -1)
printd(s, -1)
traceback.print_stack()
sys.exit(-1)
return min(ls)
dataset.dtypes["ts"] = NuggetDataset
class Cycle(list):
def nextitem(self):
try:
self._curritem = (self._curritem + 1) % len(self)
except AttributeError:
self._curritem = 0
return self[self._curritem]
def setitem(self, v):
self[self._curritem] = v
class NuggetSet(object):
def __init__(self, path, neg_samples=None):
pfiles = os.listdir(path)
if "nuggets.tsv" in pfiles:
self.nuggets = Nuggets(os.path.join(path, "nuggets.tsv"))
self.updates = Updates(os.path.join(path, "updates_sampled.tsv"))
self.matches = Matches(os.path.join(path, "matches.tsv"))
self.writer = MatchWriter
elif "0001.matches.txt" in pfiles:
self.nuggets = CLNuggets(glob.glob(os.path.join(path, "*.vitalstrings.txt")))
self.updates = CLUpdates(glob.glob(os.path.join(path, "*.summaries.txt")))
self.matches = CLMatches(glob.glob(os.path.join(path, "*.matches.txt")))
self.writer = CLMatchWriter
elif "gold_iunits.tsv" in pfiles:
self.nuggets = MCNuggets(os.path.join(path, "gold_iunits.tsv"))
self.updates = Updates(os.path.join(path, "pooled_iunits.tsv"))
self.matches = Matches(os.path.join(path, "matches.tsv"))
self.writer = MCMatchWriter
self.p = len(self.matches) / (len(self.nuggets) * len(self.updates))
self._len = len(self.nuggets) + len(self.updates)
self.size = self.nuggets.size + self.updates.size
if neg_samples is not None:
self.neg_samples = neg_samples
elif conf.neg_samples:
self.neg_samples = conf.neg_samples
else:
self.neg_samples = 5
# neg_samples is the number of negative samples to use for each positive
# sample (possibly in expectation)
def pairs(self):
try:
return self._pairs
except AttributeError:
pass
pairs = []
self._pairs = pairs
matches = self.matches
nuggets = self.nuggets
updates = self.updates
neg_samples = self.neg_samples
if neg_samples < 0:
for nugget in self.nuggets:
for update in self.updates:
match = matches.match(nugget, update)
if match:
pairs.append(SentencePair(nugget, update, label=match))
else:
pairs.append(SentencePair(nugget, update, label=0))
else:
# Method assumes # matches << #upd*#nugg
for match in matches:
nid = match["nugget_id"]
uid = match["update_id"]
if (nid not in nuggets) or (uid not in updates):
continue
pairs.append(SentencePair(nuggets[nid], updates[uid], label=1.0))
for _ in range(neg_samples):
while True:
rnid, nugg = nuggets.random_item()
ruid, upd = updates.random_item()
matchp = matches.match(rnid, ruid)
if not matchp:
break
pairs.append(SentencePair(nugg, upd, label=0.0))
return pairs
class TextFragments(object):
def __iter__(self):
for item in self.data.itervalues():
yield item
def __getitem__(self, key):
return self.data[key]
def __contains__(self, key):
return key in self.data
def random_item(self):
return self.data.random_item()
def random_key(self):
return self.data.random_key()
def random_value(self):
return self.data.random_value()
def __len__(self):
return self.count
def text(self):
res = []
for rid, rec in self.data.iteritems():
res.append(rec["tokens"])
return res
def wv_text(self):
#res = []
for rid, rec in self.data.iteritems():
#res.append(rec["wv_tokens"])
for word in rec["wv_tokens"]:
yield word
#return res
def wv_sentences(self):
for rid, rec in self.data.iteritems():
yield rec["wv_tokens"]
def sentences(self):
for rid, rec in self.data.iteritems():
yield rec["tokens"]
def wv_vocab(self):
try:
return self._wv_vocab
except AttributeError:
pass
res = Counter()
for rid, rec in self.data.iteritems():
res.update(rec["wv_tokens"])
self._wv_vocab = res
return res
def normalize(self, matcher, df):
printd("Normalizing dset")
for rid, rec in self.data.iteritems():
rec["vector"], rec["vector_sum"] = matcher.normalize(rec["vector"], df)
def vectorize(self, wordvec):
for rid, rec in self.data.items():
rec["vector"], rec["wv_tokens"] = wordvec.get_sentvec(rec["tokens"])
class Nuggets(TextFragments):
def __init__(self, filen, vectorize=False):
self.nuggets = RandomDict()
self.vectorizep = vectorize
self.read(filen)
self.data = self.nuggets
def read(self, filen):
count = 0
wordcount = 0
for rec in self.nuggetReader(filen):
toks = tokenize(rec["text"])
if len(toks) == 0:
continue
#rec["tokens"] = stem(toks)
rec["tokens"] = toks
#if self.vectorizep:
# rec["vector"], rec["wv_tokens"] = wordvec.get_sentvec(toks)
self.nuggets[rec["id"]] = rec
count += 1
wordcount += len(toks)
self.count = count
self._len = count
self.size = wordcount
def nuggetReader(self, filen):
with open(filen) as nh:
for rec in UnicodeDictReader(nh, delimiter="\t", quoting=csv.QUOTE_NONE):
rec["text"] = rec["nugget_text"]
rec["id"] = rec["nugget_id"]
yield(rec)
class TSNuggets(Nuggets):
def nuggetReader(self, filen):
with open(filen) as nh:
for rec in UnicodeDictReader(nh, delimiter="\t", quoting=csv.QUOTE_NONE):
rec["text"] = rec["nugget_text"]
rec["id"] = rec["nugget_id"]
yield(rec)
class CLNuggets(Nuggets):
def nuggetReader(self, filen):
vsfields = ["nugget_id", "impt", "length", "dep", "nugget_text"]
for nf in glob.glob(filen):
qid = "1C2-E-" + os.path.basename(nf).replace(".vitalstrings.txt", "")
with open(nf) as nh:
for rec in UnicodeDictReader(nh, fieldnames=vsfields, delimiter="\t",
quoting=csv.QUOTE_NONE):
rec["query_id"] = qid
rec["text"] = rec["nugget_text"]
rec["id"] = rec["nugget_id"]
yield(rec)
class MCNuggets(Nuggets):
def nuggetReader(self, filen):
with open(filen) as nh:
for rec in UnicodeDictReader(nh, delimiter="\t", quoting=csv.QUOTE_NONE):
rec["text"] = rec["vs_text"]
rec["id"] = rec["vs_id"]
yield(rec)
class Updates(TextFragments):
def __init__(self, filen, vectorize=False):
self.updates = RandomDict()
self.vectorizep = vectorize
self.read(filen)
self.data = self.updates
def read(self, filen):
count = 0
wordcount = 0
for rec in self.updateReader(filen):
if rec["duplicate_id"] != "NULL":
continue
toks = tokenize(rec["text"])
if len(toks) == 0:
continue
#rec["tokens"] = stem(toks)
rec["tokens"] = toks
#if self.vectorizep:
# rec["vector"], rec["wv_tokens"] = wordvec.get_sentvec(toks)
#rec["vec_sum"] = np.sum(rec["vector"], axis=0)
self.updates[rec["id"]] = rec
count += 1
wordcount += len(toks)
self.count = count
self._len = count
self.size = wordcount
def updateReader(self, filen):
with open(filen) as nh:
for rec in UnicodeDictReader(nh, delimiter="\t", quoting=csv.QUOTE_NONE):
rec["text"] = rec["update_text"]
rec["id"] = rec["update_id"]
yield(rec)
class CLUpdates(Updates):
def updateReader(self, filen):
ufields = ["query_id", "type", "update_text"]
for uf in glob.glob(filen):
update_id = os.path.basename(uf).replace(".tsv", "")
with open(uf) as uh:
uh.readline() # sysdesc
for rec in UnicodeDictReader(uh, fieldnames=ufields, delimiter="\t",
quoting=csv.QUOTE_NONE):
if rec["type"] != "OUT":
continue
rec["update_id"] = update_id
rec["id"] = update_id
rec["text"] = rec["update_text"]
rec["duplicate_id"] = "NULL"
yield(rec)
class Matches(object):
def __init__(self, filen):
matches = dict()
self.matches = matches
if filen is None:
return
count = 0
for rec in self.reader(filen):
matches[rec["nugget_id"] + rec["update_id"]] = rec
count += 1
self.count = count
def reader(self, filen):
for rec in TSVReader(filen):
yield rec
def __getitem__(self, key):
return self.matches[key]
def __contains__(self, key):
return key in self.matches
def __len__(self):
return self.count
def match(self, nid, uid):
if not isinstance(nid, basestring):
nid = nid["id"]
if not isinstance(uid, basestring):
uid = uid["id"]
return 1 if nid + uid in self.matches else 0
def __iter__(self):
return iter(self.matches.itervalues())
class CLMatches(Matches):
def reader(self, files):
mfields = ["update_id", "nugget_id", "start", "end"]
for filen in files:
for rec in TSVReader(filen, fieldnames=mfields):
yield rec
#class MatchWriter(object):
#
# def __init__(self, sf):
# self.sh = codecs.open(sf or os.devnull, 'w', 'utf-8')
#
# def __enter__(self):
# if self.sh:
# self.sh.__enter__()
# self.writeheader()
# return self
#
# def __exit__(self, ctype, value, traceback):
# if self.sh:
# self.sh.__exit__(ctype, value, traceback)
#
# def writeheader(self):
# print >>self.sh, "\t".join(("QueryID", "UpdateID", "NuggetID", "Start",
# "End", "AutoP", "Score", "Update_Text",
# "Nugget_Text"))
#
# def write(self, pair, match):
# nugget = pair.s1
# update = pair.s2
# qid = nugget["query_id"]
# print >>self.sh, "\t".join((qid, update["id"], nugget["id"],
# str(match.start), str(match.end),
# str(match.autop), "%g" % match.score,
# update["text"], nugget["text"]))
class Superset(object):
def __init__(self, *items):
self.items = items
self.size = sum([x.size for x in self.items])
def __len__(self):
return sum([len(x) for x in self.items])
def __iter__(self):
return iter(self.items)
#for dset in self.items:
# for item in dset:
# yield item
class SuperList(object):
def __init__(self, *items):
self.items = items
def __len__(self):
return sum([len(x) for x in self.items])
def __iter__(self):
return iter(chain(*self.items))
class CLMatchWriter(MatchWriter):
def __init__(self, sf):
self.sh = None
self.qid = None
self.sd = sf
if sf and not os.path.exists(sf):
os.makedirs(sf)
def writeheader(self):
pass
def write(self, pair, match):
nugget = pair.s1
update = pair.s2
qid = nugget["query_id"]
if qid != self.qid:
if self.sh:
self.sh.__exit__(None, None, None)
if self.sd:
sf = os.path.join(self.sd, "%s.matches.txt" % (qid.replace("1C2-E-", "")))
else:
sf = os.devnull
self.sh = codecs.open(sf, 'w', 'utf-8')
self.qid = qid
print >>self.sh, "\t".join((update["id"], nugget["id"],
str(match.start), str(match.end),
"%g" % match.score,
update["text"], nugget["text"]))
class MCMatchWriter(MatchWriter):
def writeheader(self):
print >>self.sh, "\t".join(("query_id", "vs_id", "update_id",
"update_source", "vs_start", "vs_end",
"score", "update_text", "vs_text"))
def write(self, pair, match):
nugget = pair.s1
update = pair.s2
qid = nugget["query_id"]
print >>self.sh, "\t".join((qid, nugget["id"], update["id"],
update["update_source"],
str(match.start), str(match.end),
"%g" % match.score,
update["text"], nugget["text"]))
def tokenize(t):
# separate numbers from other text
# TODO(mattea): should account for scientific-notation (e.g. 1e3)
t = re.sub(r"([^\s\d])(?=\d)", r"\1 ", t)
t = re.sub(r"(\d)(?=[^\s\d])", r"\1 ", t)
try:
toks = word_tokenize(t)
except Exception:
toks = t.strip().split()
# Remove nonword characters and lowercase everything
toks = [re.sub(r"[^\w\.\,\-]", "", x.lower()) for x in toks]
#return [x for x in toks if x not in stopl]
return toks
def stem(ts):
return [stemmer.stem(x) for x in ts]
|
|
# Copyright 2017 University of Rome La Sapienza and Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jul 25, 2017
@author: Emerald Ryan
"""
from __future__ import division, print_function, absolute_import
import os
import lxml.etree as ET
from CodeInterfaceBaseClass import CodeInterfaceBase
class Neutrino(CodeInterfaceBase):
"""
Provides code to interface RAVEN to Neutrino code
The name of this class represents the type in the RAVEN input file
e.g.
<Models>
<Code name="myName" subType="Neutrino">
...
</Code>
...
</Models>
"""
def generateCommand(self, inputFiles, executable, clargs=None,fargs=None,preExec=None):
"""
See base class. Collects all the clargs and the executable to produce the command-line call.
Returns tuple of commands and base file name for run.
Commands are a list of tuples, indicating parallel/serial and the execution command to use.
@ In, inputFiles, list, List of input files (length of the list depends on the number of inputs have
been added in the Step is running this code)
@ In, executable, string, executable name with absolute path (e.g. /home/path_to_executable/code.exe)
@ In, clargs, dict, optional, dictionary containing the command-line flags the user can specify in the input
(e.g. under the node < Code >< clargstype =0 input0arg =0 i0extension =0 .inp0/ >< /Code >)
@ In, fargs, dict, optional, a dictionary containing the axuiliary input file variables the user can specify
in the input (e.g. under the node < Code >< clargstype =0 input0arg =0 aux0extension =0 .aux0/ >< /Code >)
@ Out, returnCommand, tuple, tuple containing the generated command. returnCommand[0] is the command to
run the code (string), returnCommand[1] is the name of the output root
"""
found = False
# Find the first file in the inputFiles that is an XML, which is what we need to work with.
for index, inputFile in enumerate(inputFiles):
if self._isValidInput(inputFile):
found = True
break
if not found:
raise Exception('No correct input file has been found. Got: '+' '.join(inputFiles))
#Determines the path to the input file
path = inputFiles[0].getAbsFile()
#Creates the output file that saves information that is outputted to the command prompt
#The output file name of the Neutrino results
outputfile = 'results'
#Creates run command tuple (['executionType','execution command'], output file root)
#The path to the Neutrino executable is specified in the RAVEN input file as the executable
# since it must change directories to run
executablePath = executable.replace("Neutrino.exe","")
returnCommand = [('serial','cd ' + executablePath + ' && ' + executable + ' --nogui --file ' + str(path) \
+ ' --run')], outputfile
return returnCommand
def _isValidInput(self, inputFile):
"""
Check if an input file is a Neutrino input file.
@ In, inputFile, string, the file name to be checked
@ Out, valid, bool, 'True' if an input file has an extension of '.nescene', otherwise 'False'.
"""
valid = False
if inputFile.getExt() in ('nescene'):
valid = True
return valid
def getInputExtension(self):
"""
Return a tuple of possible file extensions for a simulation initialization file (i.e., dsin.txt).
@ In, None
@ Out, validExtensions, tuple, tuple of valid extensions
"""
validExtensions = ('nescene')
return validExtensions
def createNewInput(self, currentInputFiles, oriInputFiles, samplerType, **Kwargs):
"""
Generate a new OpenModelica input file (XML format) from the original, changing parameters
as specified in Kwargs['SampledVars']
@ In , currentInputFiles, list, list of current input files (input files of this iteration)
@ In , oriInputFiles, list, list of the original input files
@ In , samplerType, string, Sampler type (e.g. MonteCarlo, Adaptive, etc. see manual Samplers section)
@ In , Kwargs, dictionary, kwarded dictionary of parameters. In this dictionary there is another
dictionary called "SampledVars" where RAVEN stores the variables that got sampled
(e.g. Kwargs['SampledVars'] => {'var1':10,'var2':40})
@ Out, newInputFiles, list, list of newer input files, list of the new input files (modified and not)
"""
# Look for the correct input file
found = False
for index, inputFile in enumerate(currentInputFiles):
if self._isValidInput(inputFile):
found = True
break
if not found:
raise Exception('No correct input file has been found. Got: '+' '.join(oriInputFiles))
originalPath = currentInputFiles[index].getAbsFile()
originalPath = os.path.abspath(originalPath)
# Since the input file is XML we can load and edit it directly using etree
# Load the XML into a tree:
tree = ET.parse(originalPath, ET.XMLParser(encoding='utf-8'))
# get the root node
root = tree.getroot()
# grep the variables that got sampled
varDict = Kwargs['SampledVars']
# Go through sampled variables
for var in varDict:
#Search for the SPH solver properties
#NIISphSolver_1 name may need to be changed based on Neutrino input file
#Can add other properties to change beside the solver properties
for element in root.findall('./properties/Scene/NIISphSolver_1/'):
#Search for the Radius property
if element.get('name') == 'ParticleSize':
#Set the radius value to the sampled value
element.set('val',str(varDict[var]))
#Change where the measurements and the output data is stored in the input file to match RAVEN location
#Search for the Base properties
for elementBase in root.findall('./properties/Base/'):
#Search for the SceneFilePath property
if elementBase.get('name') == 'SceneFilePath':
#Set the SceneFilePath
elementBase.set('val', str(originalPath))
#Search for the SaveDir property
if elementBase.get('name') == 'SaveDir':
#Create and set SaveDir
#NeutrinoInput.nescene needs to be changed to the Neutrino input file name
savePath = originalPath.replace("NeutrinoInput.nescene","",1)
elementBase.set('val',str(savePath))
if elementBase.get('name') == 'CacheDir':
#Create and set CacheDir
#NeutrinoInput.nescene needs to be changed to the Neutrino input file name
cachePath = originalPath.replace("NeutrinoInput.nescene","",1)
elementBase.set('val',str(cachePath))
#Search for the Measurement field properties
#MeasurementField_1 name may need to be changed based on Neutrino input file
for elementMeas in root.findall('./properties/Scene/MeasurementField_1/'):
#Search for the exportPath property
if elementMeas.get('name') == 'exportPath':
#Create and set the exportPath
#NeutrinoInput.nescene needs to be changed to the Neutrino input file name
exportPath = originalPath.replace("NeutrinoInput.nescene","",1)
exportPath = exportPath + r"\Measurements\results.csv"
elementMeas.set('val',str(exportPath))
# Now we can re-write the input file
tree.write(originalPath)
return currentInputFiles
def finalizeCodeOutput(self, command, output, workingDir):
"""
Called by RAVEN to modify output files (if needed) so that they are in a proper form.
In this case, even if this simple code dumps the results into a CSV, we are going to read
the .out file that is in ASCI format, just to show how to use this method
@ In, command, string, the command used to run the ended job
@ In, output, string, the Output name root
@ In, workingDir, string, current working dir
@ Out, newOutputRoot, string, present in case the root of the output file gets changed in this method.
"""
# create full path to the outputfile
# NeutrinoInput needs to be the name of the Neutrino Input file
# Name of results file name needs to be the same as in the createNewInput function
outputPath = os.path.join(workingDir, "NeutrinoInput", "Measurements", "results.csv")
#Change the output path so RAVEN can read the output
newOutputPath = os.path.join(workingDir, output)
# check that the output file exists
'''if not os.path.exists(outputPath):
print('Results file does not exist. OK if during test.')
return newOutputPath'''
# open original output file (the working directory is provided)
outputFile = open(outputPath,"r+")
#Open the new output file so the results can be written to it and put in the form for RAVEN to read
resultsFile = open(newOutputPath + ".csv", 'w')
lines = outputFile.readlines()
#Needed for RAVEN to read output
#These need to match RAVEN input file output names
resultsFile.write('time,result\n')
#Write Neutrino results to a new file for RAVEN
for line in lines:
resultsFile.write(line)
resultsFile.close()
outputFile.close()
return newOutputPath
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from keystone.common import config
from keystone.openstack.common.gettextutils import _ # flake8: noqa
from keystone.openstack.common import log
from keystone.openstack.common import strutils
CONF = config.CONF
LOG = log.getLogger(__name__)
# Tests use this to make exception message format errors fatal
_FATAL_EXCEPTION_FORMAT_ERRORS = False
class Error(Exception):
"""Base error class.
Child classes should define an HTTP status code, title, and a
message_format.
"""
code = None
title = None
message_format = None
def __init__(self, message=None, **kwargs):
try:
message = self._build_message(message, **kwargs)
except KeyError:
# if you see this warning in your logs, please raise a bug report
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise
else:
LOG.warning(_('missing exception kwargs (programmer error)'))
message = self.message_format
super(Error, self).__init__(message)
def _build_message(self, message, **kwargs):
"""Builds and returns an exception message.
:raises: KeyError given insufficient kwargs
"""
if not message:
try:
message = self.message_format % kwargs
except UnicodeDecodeError:
try:
kwargs = dict([(k, strutils.safe_decode(v)) for k, v in
six.iteritems(kwargs)])
except UnicodeDecodeError:
# NOTE(jamielennox): This is the complete failure case
# at least by showing the template we have some idea
# of where the error is coming from
message = self.message_format
else:
message = self.message_format % kwargs
return message
class ValidationError(Error):
message_format = _("Expecting to find %(attribute)s in %(target)s."
" The server could not comply with the request"
" since it is either malformed or otherwise"
" incorrect. The client is assumed to be in error.")
code = 400
title = 'Bad Request'
class ValidationTimeStampError(Error):
message_format = _("Timestamp not in expected format."
" The server could not comply with the request"
" since it is either malformed or otherwise"
" incorrect. The client is assumed to be in error.")
code = 400
title = 'Bad Request'
class StringLengthExceeded(ValidationError):
message_format = _("String length exceeded.The length of"
" string '%(string)s' exceeded the limit"
" of column %(type)s(CHAR(%(length)d)).")
class ValidationSizeError(Error):
message_format = _("Request attribute %(attribute)s must be"
" less than or equal to %(size)i. The server"
" could not comply with the request because"
" the attribute size is invalid (too large)."
" The client is assumed to be in error.")
code = 400
title = 'Bad Request'
class PKITokenExpected(Error):
message_format = _('The certificates you requested are not available. '
'It is likely that this server does not use PKI tokens '
'otherwise this is the result of misconfiguration.')
code = 403
title = 'Cannot retrieve certificates'
class SecurityError(Error):
"""Avoids exposing details of security failures, unless in debug mode."""
def _build_message(self, message, **kwargs):
"""Only returns detailed messages in debug mode."""
if CONF.debug:
return message or self.message_format % kwargs
else:
return self.message_format % kwargs
class Unauthorized(SecurityError):
message_format = _("The request you have made requires authentication.")
code = 401
title = 'Unauthorized'
class AuthPluginException(Unauthorized):
message_format = _("Authentication plugin error.")
def __init__(self, *args, **kwargs):
super(AuthPluginException, self).__init__(*args, **kwargs)
self.authentication = {}
class MissingGroups(Unauthorized):
message_format = _("Unable to find valid groups while using "
"mapping %(mapping_id)s")
class AuthMethodNotSupported(AuthPluginException):
message_format = _("Attempted to authenticate with an unsupported method.")
def __init__(self, *args, **kwargs):
super(AuthMethodNotSupported, self).__init__(*args, **kwargs)
self.authentication = {'methods': CONF.auth.methods}
class AdditionalAuthRequired(AuthPluginException):
message_format = _("Additional authentications steps required.")
def __init__(self, auth_response=None, **kwargs):
super(AdditionalAuthRequired, self).__init__(message=None, **kwargs)
self.authentication = auth_response
class Forbidden(SecurityError):
message_format = _("You are not authorized to perform the"
" requested action.")
code = 403
title = 'Forbidden'
class ForbiddenAction(Forbidden):
message_format = _("You are not authorized to perform the"
" requested action, %(action)s.")
class ImmutableAttributeError(Forbidden):
message_format = _("Could not change immutable attribute %(attribute)s"
" in target %(target)s")
class NotFound(Error):
message_format = _("Could not find, %(target)s.")
code = 404
title = 'Not Found'
class EndpointNotFound(NotFound):
message_format = _("Could not find endpoint, %(endpoint_id)s.")
class MetadataNotFound(NotFound):
"""(dolph): metadata is not a user-facing concept,
so this exception should not be exposed
"""
message_format = _("An unhandled exception has occurred:"
" Could not find metadata.")
class PolicyNotFound(NotFound):
message_format = _("Could not find policy, %(policy_id)s.")
class RoleNotFound(NotFound):
message_format = _("Could not find role, %(role_id)s.")
class RegionNotFound(NotFound):
message_format = _("Could not find region, %(region_id)s.")
class ServiceNotFound(NotFound):
message_format = _("Could not find service, %(service_id)s.")
class DomainNotFound(NotFound):
message_format = _("Could not find domain, %(domain_id)s.")
class ProjectNotFound(NotFound):
message_format = _("Could not find project, %(project_id)s.")
class TokenNotFound(NotFound):
message_format = _("Could not find token, %(token_id)s.")
class UserNotFound(NotFound):
message_format = _("Could not find user, %(user_id)s.")
class GroupNotFound(NotFound):
message_format = _("Could not find group, %(group_id)s.")
class MappingNotFound(NotFound):
message_format = _("Could not find mapping, %(mapping_id)s.")
class TrustNotFound(NotFound):
message_format = _("Could not find trust, %(trust_id)s.")
class TrustUseLimitReached(Forbidden):
message_format = _("No remaining uses for trust %(trust_id)s.")
class CredentialNotFound(NotFound):
message_format = _("Could not find credential, %(credential_id)s.")
class VersionNotFound(NotFound):
message_format = _("Could not find version, %(version)s.")
class IdentityProviderNotFound(NotFound):
message_format = _("Could not find IdentityProvider, %(idp_id)s.")
class FederatedProtocolNotFound(NotFound):
message_format = _("Could not find federated protocol %(protocol_id)s for"
" IdentityProvider, %(idp_id)s")
class Conflict(Error):
message_format = _("Conflict occurred attempting to store %(type)s."
" %(details)s")
code = 409
title = 'Conflict'
class RequestTooLarge(Error):
message_format = _("Request is too large.")
code = 413
title = 'Request is too large.'
class UnexpectedError(SecurityError):
"""Avoids exposing details of failures, unless in debug mode."""
_message_format = _("An unexpected error prevented the server "
"from fulfilling your request.")
debug_message_format = _("An unexpected error prevented the server "
"from fulfilling your request. %(exception)s")
@property
def message_format(self):
"""Return the generic message format string unless debug is enabled."""
if CONF.debug:
return self.debug_message_format
return self._message_format
def _build_message(self, message, **kwargs):
if CONF.debug and 'exception' not in kwargs:
# Ensure that exception has a value to be extra defensive for
# substitutions and make sure the exception doesn't raise an
# exception.
kwargs['exception'] = ''
return super(UnexpectedError, self)._build_message(message, **kwargs)
code = 500
title = 'Internal Server Error'
class TrustConsumeMaximumAttempt(UnexpectedError):
debug_message_format = _("Unable to consume trust %(trust_id)s, unable to "
"acquire lock.")
class CertificateFilesUnavailable(UnexpectedError):
debug_message_format = _("Expected signing certificates are not available "
"on the server. Please check Keystone "
"configuration.")
class MalformedEndpoint(UnexpectedError):
debug_message_format = _("Malformed endpoint URL (%(endpoint)s),"
" see ERROR log for details.")
class MappedGroupNotFound(UnexpectedError):
debug_message_format = _("Group %(group_id)s returned by mapping "
"%(mapping_id)s was not found in the backend.")
class NotImplemented(Error):
message_format = _("The action you have requested has not"
" been implemented.")
code = 501
title = 'Not Implemented'
class Gone(Error):
message_format = _("The service you have requested is no"
" longer available on this server.")
code = 410
title = 'Gone'
class ConfigFileNotFound(UnexpectedError):
debug_message_format = _("The Keystone configuration file %(config_file)s "
"could not be found.")
class MigrationNotProvided(Exception):
def __init__(self, mod_name, path):
super(MigrationNotProvided, self).__init__(_(
"%(mod_name)s doesn't provide database migrations. The migration"
" repository path at %(path)s doesn't exist or isn't a directory."
) % {'mod_name': mod_name, 'path': path})
|
|
"""Tests for module gramcore.data.images"""
import os
import numpy
from PIL import Image, ImageStat
from nose.tools import assert_equal, raises
from gramcore.data import images
def setup():
"""Create image fixtures for test_load_*
To create an image PIL requires size (width, height in pixels) and mode
('L', 'RGB', etc). The background color is set by default to black
(value == 0). Color values in RGB images are stored in (R, G, B) order.
"""
img = Image.new('RGB', (10, 20))
img.putpixel((5, 10), (0, 255, 0))
img.save('green-dot.tif')
img.save('green-dot.jpg')
img.save('green-dot.png')
def teardown():
"""Delete fixtures of test_save_* outputs"""
os.remove('green-dot.tif')
os.remove('green-dot.jpg')
os.remove('green-dot.png')
def test_fromarray_grey():
"""Coversion from array to greyscale image
Checks for correct shape, value assignment and type conversion.
In general width == columns == xx' and height == rows == yy'. A 2D array
will be converted to an image of mode '1', L' or 'F'. If the array has a
shape of (20, 10) the resulting image will have size (10, 20).
.. warning::
Converting from array of floats to 'L' image will reduce accuracy. 'F'
images are usually not recognized from viewers and create problems
with image stats. Notice below that only rounding ensures assertions.
"""
arr = numpy.zeros((20, 10), dtype='float')
arr[10, 5] = 249.34
parameters = {'data': [arr]}
img = images.fromarray(parameters).convert('L')
stats = ImageStat.Stat(img)
assert_equal(img.size, (10, 20))
assert_equal(img.getpixel((5, 10)), round(arr[10, 5]))
assert_equal(stats.sum[0], round(arr.sum()))
@raises(TypeError)
def test_fromarray_rgb_fail():
"""Fail to covert array to RGB image, PIL doesn't support it"""
arr = numpy.zeros((20, 10, 3), dtype='float')
parameters = {'data': [arr]}
images.fromarray(parameters).convert('RGB')
def test_load_tif():
"""Load tif fixture and check pixel color
Using assert_array_equal which is the suitable solution for arrays.
"""
parameters = {'path': 'green-dot.tif'}
img = images.load(parameters)
numpy.testing.assert_array_equal(img[10, 5], [0, 255, 0])
def test_load_jpg():
"""Load jpg fixture
Can't check for color here, because compression changes it.
"""
parameters = {'path': 'green-dot.jpg'}
images.load(parameters)
def test_load_png():
"""Load png fixture and check pixel color
Using assert_array_equal which is the suitable solution for arrays.
"""
parameters = {'path': 'green-dot.png'}
img = images.load(parameters)
numpy.testing.assert_array_equal(img[10, 5], [0, 255, 0])
@raises(TypeError)
def test_load_fail():
"""Fail to load file with unkown extension"""
parameters = {'path': 'foo.bar'}
images.load(parameters)
def test_save_tif():
"""Save image to tif"""
img = Image.new('RGB', (10, 20))
parameters = {'path': 'green-dot.tif', 'data': [img]}
assert images.save(parameters)
def test_save_jpg():
"""Save image to jpg"""
img = Image.new('RGB', (10, 20))
parameters = {'path': 'green-dot.jpg', 'data': [img]}
assert images.save(parameters)
def test_save_png():
"""Save image to png"""
img = Image.new('RGB', (10, 20))
parameters = {'path': 'green-dot.png', 'data': [img]}
assert images.save(parameters)
@raises(TypeError)
def test_save_fail():
"""Fail to save file with unknown extension"""
img = Image.new('RGB', (10, 20))
parameters = {'path': 'foo.bar', 'data': [img]}
images.save(parameters)
def test_synth_positions():
"""Check synth positions with a large background and small patches"""
background = Image.new('RGB', (30, 20))
patch_1 = Image.new('RGB', (10, 10))
patch_2 = Image.new('RGB', (20, 5))
parameters = {'data': [background, patch_1, patch_2]}
positions = images.synth_positions(parameters)
assert_equal(positions[0][0], 0)
assert_equal(positions[0][1], 5)
assert_equal(positions[1][0], 10)
assert_equal(positions[1][1], 5)
@raises(ValueError)
def test_synth_positions_small_width():
"""Fail in synth_positions because of small backgound width"""
background = Image.new('RGB', (20, 20))
patch_1 = Image.new('RGB', (10, 20))
patch_2 = Image.new('RGB', (11, 20))
parameters = {'data': [background, patch_1, patch_2]}
positions = images.synth_positions(parameters)
@raises(ValueError)
def test_synth_positions_small_height():
"""Fail in synth_positions because of small backgound height"""
background = Image.new('RGB', (20, 20))
patch_1 = Image.new('RGB', (10, 21))
patch_2 = Image.new('RGB', (10, 21))
parameters = {'data': [background, patch_1, patch_2]}
positions = images.synth_positions(parameters)
def test_synthetic():
"""Create a synthetic image and check for size and color assignment
The two first patches will overlap and the last will be cropped. Notice,
that overlapping patches overwrite each other and that patches partially
outside the background are simply cropped and not return an error.
"""
background = Image.new('RGB', (100, 50), (125, 125, 125))
red = Image.new('RGB', (10, 5), (255, 0, 0))
green = Image.new('RGB', (5, 5), (0, 255, 0))
blue = Image.new('RGB', (20, 5), (0, 0, 255))
positions = [
[0, 0],
[9, 5],
[99, 20]
]
parameters = {
'data': [background, red, green, blue],
'positions': positions
}
synth = images.synthetic(parameters)
assert_equal(synth.size, (100, 50))
assert_equal(synth.getpixel((0, 0)), (255, 0, 0, 255))
# if there was no overwrite of overlapping patches, this should be:
# assert_equal(synth.getpixel((9, 5)), (255, 255, 0, 255))
# but since green is pasted last it is:
assert_equal(synth.getpixel((9, 5)), (0, 255, 0, 255))
def test_synthetic_auto():
"""Create a synthetic image with automatic positions"""
background = Image.new('RGB', (7, 3), (125, 125, 125))
red = Image.new('RGB', (1, 1), (255, 0, 0))
green = Image.new('RGB', (1, 1), (0, 255, 0))
blue = Image.new('RGB', (1, 1), (0, 0, 255))
parameters = {
'data': [background, red, green, blue],
'positions': 'auto'
}
synth = images.synthetic(parameters)
assert_equal(synth.size, (7, 3))
assert_equal(synth.getpixel((1, 1)), (255, 0, 0, 255))
assert_equal(synth.getpixel((3, 1)), (0, 255, 0, 255))
assert_equal(synth.getpixel((5, 1)), (0, 0, 255, 255))
@raises(ValueError)
def test_synthetic_less_positions():
"""Fail to create synthetic image, less positions than patches"""
background = Image.new('RGB', (100, 50))
patch = Image.new('RGB', (10, 10))
positions = []
parameters = {
'data': [background, patch],
'positions': positions
}
images.synthetic(parameters)
@raises(ValueError)
def test_synthetic_more_positions():
"""Fail to create synthetic image, more positions than patches"""
background = Image.new('RGB', (100, 50))
patch = Image.new('RGB', (10, 10))
positions = [
[5, 5],
[9, 5]
]
parameters = {
'data': [background, patch],
'positions': positions
}
images.synthetic(parameters)
def test_tiled():
"""Create a tiled image and check for size and color assignment"""
size = [25, 25]
img = Image.new('RGB', (10, 10))
img.putpixel((5, 5), (0, 255, 0))
parameters = {'data': [img], 'size': size}
tiled = images.tiled(parameters)
assert_equal(tiled.size, tuple(size))
assert_equal(tiled.getpixel((5, 5)), (0, 255, 0))
assert_equal(tiled.getpixel((15, 5)), (0, 255, 0))
|
|
'''
Copyright (c) 2018 Yogesh Khatri
This file is part of mac_apt (macOS Artifact Parsing Tool).
Usage or distribution of this software/code is subject to the
terms of the MIT License.
iDevice_backups.py
------------
This plugin will scan for iPad/iPhone backups and export all files and databases found.
It does not recreate the file and folder structure (for all exported files), there are
already many tools available to do this, just point them to the exported folder at
<YourOutputFolder>/Exports/IDEVICEBACKUPS/<USER>_<BACKUP_UUID>
'''
import io
import logging
import os
import time
from plugins.helpers.common import *
from plugins.helpers.macinfo import *
from plugins.helpers.writer import *
__Plugin_Name = "IDEVICEBACKUPS"
__Plugin_Friendly_Name = "iDevice Backup Info"
__Plugin_Version = "1.0"
__Plugin_Description = "Reads and exports iPhone/iPad backup databases"
__Plugin_Author = "Jack Farley, Yogesh Khatri"
__Plugin_Author_Email = "jack.farley@mymail.champlain.edu, yogesh@swiftforensics.com"
__Plugin_Modes = "MACOS,ARTIFACTONLY"
__Plugin_ArtifactOnly_Usage = 'Reads iDevice backup databases found at /Users/<USER>/Library/Application Support/MobileSync/Backup. '\
'Provide the path to this folder as input for this plugin'
log = logging.getLogger('MAIN.' + __Plugin_Name) # Do not rename or remove this ! This is the logger object
#---- Do not change the variable names in above section ----#
class iDeviceBackup:
def __init__(self, Device_Name, Product_Name, Product_Model, Phone_Num, iOS_Vers, Backup_Start, Backup_End,
Last_Backup_Date, Passcode_Set, Encrypted, GUID, ICCID, IMEI, MEID, SN, Full_Backup, Version, iTunes_Vers, apps, user, source):
self.Device_Name = Device_Name
self.Product_Name = Product_Name
self.Product_Model = Product_Model
self.Phone_Num = Phone_Num
self.iOS_Vers = iOS_Vers
self.Backup_Start = Backup_Start
self.Backup_End = Backup_End
self.Last_Backup_Date = Last_Backup_Date
self.Passcode_Set = Passcode_Set
self.Encrypted = Encrypted
self.GUID = GUID
self.ICCID = ICCID
self.IMEI = IMEI
self.MEID = MEID
self.SN = SN
self.Full_Backup = Full_Backup
self.Version = Version
self.iTunes_Vers = iTunes_Vers
self.apps = apps
self.user = user
self.source = source
def PrintAll(output_params, source_path, backups):
backup_labels = [('Device_Name',DataType.TEXT),('Product_Name',DataType.TEXT),('Product_Model',DataType.TEXT),
('Phone_Num',DataType.TEXT),('iOS_Vers',DataType.TEXT), ('Backup_Start',DataType.DATE),
('Backup_End',DataType.DATE),('Last_Backup_Date',DataType.DATE),('Passcode_Set',DataType.TEXT),
('Encrypted',DataType.TEXT),('GUID',DataType.TEXT),('ICCID',DataType.TEXT),
('IMEI', DataType.TEXT), ('MEID', DataType.TEXT),('SN', DataType.TEXT),
('Full_Backup', DataType.TEXT), ('Version', DataType.TEXT), ('iTunes_Vers', DataType.TEXT),
('Apps_on_device',DataType.TEXT),('User', DataType.TEXT),('Source',DataType.TEXT)
]
backup_list = []
for bkp in backups:
bkps_item = [ bkp.Device_Name, bkp.Product_Name, bkp.Product_Model, bkp.Phone_Num,
bkp.iOS_Vers, bkp.Backup_Start, bkp.Backup_End, bkp.Last_Backup_Date, bkp.Passcode_Set,
bkp.Encrypted, bkp.GUID, bkp.ICCID, bkp.IMEI, bkp.MEID,
bkp.SN, bkp.Full_Backup, bkp.Version, bkp.iTunes_Vers,
bkp.apps, bkp.user, bkp.source
]
backup_list.append(bkps_item)
WriteList("iDevice Backups", "iDevice_Backups", backup_list, backup_labels, output_params, source_path)
def BackupFinder(mac_info, source, user):
'''Finds backup folders and returns them in a list'''
paths = []
backup_folders = mac_info.ListItemsInFolder(source, EntryType.FOLDERS, True)
if len(backup_folders) > 0:
log.info(str(len(backup_folders)) + " iDevice Backups Found for user " + user)
for folder in backup_folders:
full_folder_path = source + '/' + folder['name']
# Check for empty folders, sometimes they are empty!
backup_folder_files = mac_info.ListItemsInFolder(full_folder_path, EntryType.FOLDERS, False)
if len(backup_folder_files) > 0:
paths.append(full_folder_path)
return paths
def ReadBackups(mac_info, export_folder_path, info_plist_path, status_plist_path, manifest_plist_path, user, backups, source):
'''Captures relevant data in Info.plist, Status.plist, Manifest.plist'''
success, info_plist, error = mac_info.ReadPlist(info_plist_path)
if not success:
info_plist = {}
log.error('Error reading Info.plist - ' + error)
success, status_plist, error = mac_info.ReadPlist(status_plist_path)
if not success:
status_plist = {}
log.error('Error reading Status.plist - ' + error)
success, manifest_plist, error = mac_info.ReadPlist(manifest_plist_path)
if not success:
manifest_plist = {}
log.error('Error reading Manifest.plist - ' + error)
ReadDataFromPlists(info_plist, status_plist, manifest_plist, user, backups, source)
# Try exporting files
base_folder = os.path.dirname(info_plist_path)
log.debug('Lets try to export files now from {}'.format(base_folder))
slash = '\\' if (os.name == 'nt') else '/'
files_exported = 0
time_processing_started = time.time()
folders = mac_info.ListItemsInFolder(base_folder, EntryType.FOLDERS, False)
for folder in folders:
path = base_folder + '/' + folder['name']
files = mac_info.ListItemsInFolder(path, EntryType.FILES, False)
for item in files:
mac_info.ExportFile(path + '/' + item['name'], export_folder_path + slash + folder['name'], '', False)
files_exported += 1
time_processing_ended = time.time()
run_time = time_processing_ended - time_processing_started
log.debug("export time for {} files = {}".format(time.strftime('%H:%M:%S', time.gmtime(run_time)), files_exported))
def ReadDataFromPlists(info_plist, status_plist, manifest_plist, user, backups, source):
lockdown = manifest_plist.get('Lockdown', {})
deviceName = info_plist.get('Device Name', '')
bkps = iDeviceBackup(
deviceName,
info_plist.get('Product Name', ''),
info_plist.get('Product Type', ''),
info_plist.get('Phone Number', ''),
lockdown.get('ProductVersion', ''),
manifest_plist.get('Date', ''),
status_plist.get('Date', ''),
info_plist.get('Last Backup Date', ''),
manifest_plist.get('WasPasscodeSet', ''),
manifest_plist.get('IsEncrypted', ''),
info_plist.get('GUID', ''),
info_plist.get('ICCID', ''),
info_plist.get('IMEI', ''),
info_plist.get('MEID', ''),
info_plist.get('Serial Number', ''),
status_plist.get('IsFullBackup', ''),
status_plist.get('Version', ''),
info_plist.get('iTunes Version', ''),
",".join(ReadApps(info_plist.get('Applications', {}))),
user,
source)
backups.append(bkps)
def ReadApps(applications_dict):
'''Get's application names only'''
#TODO- Get all app details
apps = []
for k, v in applications_dict.items():
plist_blob = v.get('iTunesMetadata', None)
if plist_blob:
f = io.BytesIO(plist_blob)
success, plist, error = CommonFunctions.ReadPlist(f)
if success:
app_name = plist.get('itemName', None)
if app_name:
apps.append(app_name)
else:
log.error(f"Failed to read iTunesMetadata embedded plist for {k}. Error was {error}")
return apps
def ReadBackupsStandalone(info_plist_path, status_plist_path, manifest_plist_path, backups, source):
success, info_plist, error = CommonFunctions.ReadPlist(info_plist_path)
if not success:
log.error("Failed to read Info.plist from path {}. {}".format(info_plist_path, error))
info_plist = {}
success, status_plist, error = CommonFunctions.ReadPlist(status_plist_path)
if not success:
log.error("Failed to read Status.plist from path {}. {}".format(status_plist_path, error))
status_plist = {}
success, manifest_plist, error = CommonFunctions.ReadPlist(manifest_plist_path)
if not success:
log.error("Failed to read Manifest.plist from path {}. {}".format(manifest_plist_path, error))
manifest_plist = {}
ReadDataFromPlists(info_plist, status_plist, manifest_plist, '', backups, source)
def Plugin_Start(mac_info):
'''Main Entry point function for plugin'''
backupPath = '{}/Library/Application Support/MobileSync/Backup'
processed_paths = []
backups = []
for user in mac_info.users:
for user in mac_info.users:
user_name = user.user_name
if user.home_dir == '/private/var':
continue # Optimization, nothing should be here!
elif user.home_dir == '/private/var/root':
user_name = 'root' # Some other users use the same root folder, we will list such all users as 'root', as there is no way to tell
if user.home_dir in processed_paths: continue # Avoid processing same folder twice (some users have same folder! (Eg: root & daemon))
processed_paths.append(user.home_dir)
userBackupPath = backupPath.format(user.home_dir)
if mac_info.IsValidFolderPath(userBackupPath):
deviceFolders = BackupFinder(mac_info, userBackupPath, user_name)
for folder in deviceFolders:
info_plist_path = folder + '/Info.plist'
status_plist_path = folder + '/Status.plist'
manifest_plist_path = folder + '/Manifest.plist'
manifest_db_path1 = folder + '/Manifest.mbdb'
manifest_db_path2 = folder + '/Manifest.db' # ios 9 and above
has_info_plist = False
has_status_plist = False
has_manifest_plist = False
export_folder_path = os.path.join(__Plugin_Name, user_name + "_" + os.path.basename(folder)) # Should create folder EXPORT/IDEVICEBACKUPS/user_BackupUUID/
if mac_info.IsValidFilePath(info_plist_path):
has_info_plist = True
mac_info.ExportFile(info_plist_path, export_folder_path, '', False)
else:
log.error("Failed to find Info.plist in {}".format(folder))
if mac_info.IsValidFilePath(status_plist_path):
has_status_plist = True
mac_info.ExportFile(status_plist_path, export_folder_path, '', False)
else:
log.error("Failed to find Status.plist in {}".format(folder))
if mac_info.IsValidFilePath(manifest_plist_path):
has_manifest_plist = True
mac_info.ExportFile(manifest_plist_path, export_folder_path, '', False)
else:
log.error("Failed to find Manifest.plist in {}".format(folder))
if mac_info.IsValidFilePath(manifest_db_path1):
has_manifest_plist = True
mac_info.ExportFile(manifest_db_path1, export_folder_path, '', False)
elif mac_info.IsValidFilePath(manifest_db_path2):
has_manifest_plist = True
mac_info.ExportFile(manifest_db_path2, export_folder_path, '', False)
if has_manifest_plist and has_info_plist:
ReadBackups(mac_info, export_folder_path, info_plist_path, status_plist_path, manifest_plist_path, user.user_name, backups, folder)
if backups:
PrintAll(mac_info.output_params, '', backups)
else:
log.info('No iDevice backups found')
def Plugin_Start_Standalone(input_files_list, output_params):
log.info("Module Started as standalone")
backups = []
inputFolder = str(input_files_list[0])
if os.path.isdir(inputFolder):
log.debug("Input folder passed was: " + inputFolder)
info_plist_path = os.path.join(inputFolder, 'Info.plist')
status_plist_path = os.path.join(inputFolder, 'Status.plist')
manifest_plist_path = os.path.join(inputFolder, 'Manifest.plist')
ReadBackupsStandalone(info_plist_path, status_plist_path, manifest_plist_path, backups, inputFolder)
if backups:
PrintAll(output_params, '', backups)
else:
log.info('No iDevice backups found')
else:
log.error("Input must be a folder containing backup plists and data")
if __name__ == '__main__':
print ("This plugin is a part of a framework and does not run independently on its own!")
|
|
import json
import sqlite3
import sys
import re
import dateutil.parser
import datetime
# API Reference is at
# http://developer.trademe.co.nz/api-reference/search-methods/residential-search/
def create_table_residential(dbPath):
conn = sqlite3.connect(dbPath)
with conn:
cur = conn.cursor()
#cur.execute("DROP TABLE IF EXISTS residential_listings")
cur.execute("CREATE TABLE IF NOT EXISTS residential_listings (ListingId INTEGER,Title VARCHAR(100),Category VARCHAR(50),StartPrice REAL,StartDate INTEGER,EndDate INTEGER,IsFeatured INTEGER,HasGallery INTEGER,IsBold INTEGER,IsHighlighted INTEGER,AsAt INTEGER,CategoryPath VARCHAR(100),PictureHref VARCHAR(100),RegionId INTEGER,Region VARCHAR(50),SuburbId INTEGER,Suburb VARCHAR(100),ReserveState INTEGER,IsClassified VARCHAR(5),Latitude REAL,Longitude REAL,Northing INTEGER,Easting INTEGER,Accuracy INTEGER,PriceDisplay VARCHAR(100),Address VARCHAR(100),District VARCHAR(100),AgencyReference VARCHAR(10),LandArea INTEGER,Bathrooms INTEGER,Bedrooms INTEGER,ListingGroup VARCHAR(100),Parking VARCHAR(100),PropertyType VARCHAR(50),PropertyId VARCHAR(50),DistrictId INTEGER,AgencyId INTEGER,AgencyName VARCHAR(255),AgencyPhoneNumber VARCHAR(20),IsRealEstateAgnecy VARCHAR(6),IsLicensedPropertyAgency VARCHAR(6), UNIQUE(ListingId) ON CONFLICT REPLACE)")
def create_table_rental(dbPath):
conn = sqlite3.connect(dbPath)
with conn:
cur = conn.cursor()
#cur.execute("DROP TABLE IF EXISTS rental_listings")
cur.execute("CREATE TABLE IF NOT EXISTS rental_listings (ListingId INTEGER,Title VARCHAR(100),Category VARCHAR(50),RentPerWeek INTEGER,SmokersOkay VARCHAR(5),StartDate INTEGER,EndDate INTEGER,IsFeatured INTEGER,HasGallery INTEGER,IsBold INTEGER,IsHighlighted INTEGER,AsAt INTEGER,CategoryPath VARCHAR(100),PictureHref VARCHAR(100),RegionId INTEGER,Region VARCHAR(50),SuburbId INTEGER,Suburb VARCHAR(100),ReserveState INTEGER,IsClassified VARCHAR(5),Latitude REAL,Longitude REAL,Northing INTEGER,Easting INTEGER,Accuracy INTEGER,PriceDisplay VARCHAR(100),Address VARCHAR(100),District VARCHAR(100),AgencyReference VARCHAR(10),LandArea INTEGER,Bathrooms INTEGER,Bedrooms INTEGER,ListingGroup VARCHAR(100),Parking VARCHAR(100),PropertyType VARCHAR(50),PropertyId VARCHAR(50),DistrictId INTEGER,AgencyId INTEGER,AgencyName VARCHAR(255),AgencyPhoneNumber VARCHAR(20),IsRealEstateAgnecy VARCHAR(6),IsLicensedPropertyAgency VARCHAR(6), UNIQUE(ListingId) ON CONFLICT REPLACE)")
def create_table_schools(dbPath):
conn = sqlite3.connect(dbPath)
with conn:
cur = conn.cursor()
#cur.execute("DROP TABLE IF EXISTS schools")
cur.execute("CREATE TABLE IF NOT EXISTS schools (Id INTEGER, Name VARCHAR(255), Decile INTEGER, UNIQUE(ListingId) ON CONFLICT REPLACE)")
def create_table_residential_listings_individual(dbPath):
conn = sqlite3.connect(dbPath)
with conn:
cur = conn.cursor()
#cur.execute("DROP TABLE IF EXISTS residential_listings_individual")
cur.execute("CREATE TABLE IF NOT EXISTS residential_individual_listings (ListingId INTEGER, Body VARCHAR(10000), ViewCount INTEGER, UNIQUE(ListingId) ON CONFLICT REPLACE)")
def insert_individual_listing(listing, dbPath):
#print json.dumps(listing, sort_keys=True, indent=4)
listing_tuple_all = []
listing_tuple = (listing[u'ListingId'],listing[u'Body'],listing[u'ViewCount'])
listing_tuple_all.append(listing_tuple)
conn = sqlite3.connect(dbPath) #conn = sqlite3.connect(":memory:")
with conn:
cur = conn.cursor()
#cur.executemany("INSERT INTO residential_individual_listings VALUES(?,?,?)", listing_tuple_all)
cur.execute("INSERT INTO residential_individual_listings VALUES(?,?,?)", listing_tuple)
def insert_residential_json(residential_json_pages, dbPath):
property_tuple_all = []
num_records = 0
#for x in residential_json_pages: #[:1]:
for i in range(0,len(residential_json_pages)):
x = residential_json_pages[i]
print 'starting page ' + str(i)
#if i == 1:
#print 'x on following line'
#print type(x)
#print x[u'Page']
#print x[u'PageSize']
for y in x[u'List']: #[:20]:
ListingId = y.get('ListingId') if y.get('ListingId') else -99
Title = y.get('Title') if y.get('Title') else ''
Category = y.get('Category') if y.get('Category') else ''
StartPrice = y.get('StartPrice') if y.get('StartPrice') else -99
if (y.get('StartDate')):
StartDate = re.sub(r'\/Date\(([0-9]*)\)\/',r'd\1',y.get('StartDate'))
#StartDate = datetime.datetime.fromtimestamp(int(StartDate)/1000.0)
else:
StartDate = -99
if (y.get('EndDate')):
EndDate = re.sub(r'\/Date\(([0-9]*)\)\/',r'd\1',y.get('EndDate'))
#EndDate = datetime.datetime.fromtimestamp(int(EndDate)/1000.0)
else:
EndDate = -99
IsFeatured = y.get('IsFeatured') if y.get('IsFeatured') else -99
HasGallery = y.get('HasGallery') if y.get('HasGallery') else -99
IsBold = y.get('IsBold') if y.get('IsBold') else -99
IsHighlighted = y.get('IsHighlighted') if y.get('IsHighlighted') else -99
AsAt = y.get('AsAt') if y.get('AsAt') else -99
CategoryPath = y.get('CategoryPath') if y.get('CategoryPath') else ''
PictureHref = y.get('PictureHref') if y.get('PictureHref') else ''
RegionId = y.get('RegionId') if y.get('RegionId') else -99
Region = y.get('Region') if y.get('Region') else ''
SuburbId = y.get('SuburbId') if y.get('SuburbId') else -99
Suburb = y.get('Suburb') if y.get('Suburb') else ''
ReserveState = y.get('ReserveState') if y.get('ReserveState') else -99
IsClassified = y.get('IsClassified') if y.get('IsClassified') else ''
Latitude = y.get('GeographicLocation').get('Latitude') if (y.get('GeographicLocation') and y.get('GeographicLocation').get('Latitude')) else -99
Longitude = y.get('GeographicLocation').get('Longitude') if (y.get('GeographicLocation') and y.get('GeographicLocation').get('Longitude')) else -99
Northing = y.get('GeographicLocation').get('Northing') if (y.get('GeographicLocation') and y.get('GeographicLocation').get('Northing')) else -99
Easting = y.get('GeographicLocation').get('Easting') if (y.get('GeographicLocation') and y.get('GeographicLocation').get('Easting')) else -99
Accuracy = y.get('GeographicLocation').get('Accuracy') if (y.get('GeographicLocation') and y.get('GeographicLocation').get('Accuracy')) else -99
PriceDisplay = y.get('PriceDisplay') if y.get('PriceDisplay') else ''
if ('$' in y.get('PriceDisplay')):
StartPrice = re.sub(r'.*\$(.*)',r'\1',y.get('PriceDisplay'))
StartPrice = re.sub(r',',r'',StartPrice) # re-use StartPrice field as it seems unused
Address = y.get('Address') if y.get('Address') else ''
District = y.get('District') if y.get('District') else ''
AgencyReference = y.get('AgencyReference') if y.get('AgencyReference') else ''
LandArea = y.get('LandArea') if y.get('LandArea') else -99
Bathrooms = y.get('Bathrooms') if y.get('Bathrooms') else -99
Bedrooms = y.get('Bedrooms') if y.get('Bedrooms') else -99
ListingGroup = y.get('ListingGroup') if y.get('ListingGroup') else ''
Parking = y.get('Parking') if y.get('Parking') else ''
PropertyType = y.get('PropertyType') if y.get('PropertyType') else ''
PropertyId = y.get('PropertyId') if y.get('PropertyId') else ''
DistrictId = y.get('DistrictId') if y.get('DistrictId') else -99
AgencyId = y.get('Agency').get('Id') if (y.get('Agency') and y.get('Agency').get('Id')) else -99
AgencyName = y.get('Agency').get('Name') if (y.get('Agency') and y.get('Agency').get('Name')) else ''
AgencyPhoneNumber = y.get('Agency').get('PhoneNumber') if (y.get('Agency') and y.get('Agency').get('PhoneNumber')) else ''
IsRealEstateAgency = y.get('Agency').get('IsRealEstateAgency') if (y.get('Agency') and y.get('Agency').get('IsRealEstateAgency')) else ''
IsLicensedPropertyAgency = y.get('Agency').get('IsLicensedPropertyAgency') if (y.get('Agency') and y.get('Agency').get('IsLicensedPropertyAgency')) else ''
property_tuple = (ListingId,Title,Category,StartPrice,StartDate,EndDate,IsFeatured,HasGallery,IsBold,IsHighlighted,AsAt,CategoryPath,PictureHref,RegionId,Region,SuburbId,Suburb,ReserveState,IsClassified,Latitude,Longitude,Northing,Easting,Accuracy,PriceDisplay,Address,District,AgencyReference,LandArea,Bathrooms,Bedrooms,ListingGroup,Parking,PropertyType,PropertyId,DistrictId,AgencyId,AgencyName,AgencyPhoneNumber,IsRealEstateAgency,IsLicensedPropertyAgency)
num_records += 1
property_tuple_all.append(property_tuple)
print 'finished page'
print 'number of records by counter ' + str(num_records)
print 'length of property_tuple ' + str(len(property_tuple_all))
print 'starting database entry'
conn = sqlite3.connect(dbPath) #conn = sqlite3.connect(":memory:")
with conn:
cur = conn.cursor()
#cur.execute("INSERT INTO Cars VALUES(1,'Audi',52642)")
cur.executemany("INSERT INTO residential_listings VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", property_tuple_all)
print 'finished residential database entry'
def insert_rental_json(rental_json_pages, dbPath):
property_tuple_all = []
num_records = 0
#for x in rental_json_pages: #[:1]:
for i in range(0,len(rental_json_pages)):
x = rental_json_pages[i]
print 'starting page ' + str(i)
#if i == 1:
#print 'x on following line'
#print type(x)
#print x[u'Page']
#print x[u'PageSize']
for y in x[u'List']: #[:20]:
ListingId = y.get('ListingId') if y.get('ListingId') else -99
Title = y.get('Title') if y.get('Title') else ''
Category = y.get('Category') if y.get('Category') else ''
RentPerWeek = y.get('RentPerWeek') if y.get('RentPerWeek') else -99
SmokersOkay = y.get('SmokersOkay') if y.get('SmokersOkay') else ''
if (y.get('StartDate')):
StartDate = re.sub(r'\/Date\(([0-9]*)\)\/',r'd\1',y.get('StartDate'))
#StartDate = datetime.datetime.fromtimestamp(int(StartDate)/1000.0)
else:
StartDate = -99
if (y.get('EndDate')):
EndDate = re.sub(r'\/Date\(([0-9]*)\)\/',r'd\1',y.get('EndDate'))
#EndDate = datetime.datetime.fromtimestamp(int(EndDate)/1000.0)
else:
EndDate = -99
IsFeatured = y.get('IsFeatured') if y.get('IsFeatured') else -99
HasGallery = y.get('HasGallery') if y.get('HasGallery') else -99
IsBold = y.get('IsBold') if y.get('IsBold') else -99
IsHighlighted = y.get('IsHighlighted') if y.get('IsHighlighted') else -99
AsAt = y.get('AsAt') if y.get('AsAt') else -99
CategoryPath = y.get('CategoryPath') if y.get('CategoryPath') else ''
PictureHref = y.get('PictureHref') if y.get('PictureHref') else ''
RegionId = y.get('RegionId') if y.get('RegionId') else -99
Region = y.get('Region') if y.get('Region') else ''
SuburbId = y.get('SuburbId') if y.get('SuburbId') else -99
Suburb = y.get('Suburb') if y.get('Suburb') else ''
ReserveState = y.get('ReserveState') if y.get('ReserveState') else -99
IsClassified = y.get('IsClassified') if y.get('IsClassified') else ''
Latitude = y.get('GeographicLocation').get('Latitude') if (y.get('GeographicLocation') and y.get('GeographicLocation').get('Latitude')) else -99
Longitude = y.get('GeographicLocation').get('Longitude') if (y.get('GeographicLocation') and y.get('GeographicLocation').get('Longitude')) else -99
Northing = y.get('GeographicLocation').get('Northing') if (y.get('GeographicLocation') and y.get('GeographicLocation').get('Northing')) else -99
Easting = y.get('GeographicLocation').get('Easting') if (y.get('GeographicLocation') and y.get('GeographicLocation').get('Easting')) else -99
Accuracy = y.get('GeographicLocation').get('Accuracy') if (y.get('GeographicLocation') and y.get('GeographicLocation').get('Accuracy')) else -99
PriceDisplay = y.get('PriceDisplay') if y.get('PriceDisplay') else ''
Address = y.get('Address') if y.get('Address') else ''
District = y.get('District') if y.get('District') else ''
AgencyReference = y.get('AgencyReference') if y.get('AgencyReference') else ''
LandArea = y.get('LandArea') if y.get('LandArea') else -99
Bathrooms = y.get('Bathrooms') if y.get('Bathrooms') else -99
Bedrooms = y.get('Bedrooms') if y.get('Bedrooms') else -99
ListingGroup = y.get('ListingGroup') if y.get('ListingGroup') else ''
Parking = y.get('Parking') if y.get('Parking') else ''
PropertyType = y.get('PropertyType') if y.get('PropertyType') else ''
PropertyId = y.get('PropertyId') if y.get('PropertyId') else ''
DistrictId = y.get('DistrictId') if y.get('DistrictId') else -99
AgencyId = y.get('Agency').get('Id') if (y.get('Agency') and y.get('Agency').get('Id')) else -99
AgencyName = y.get('Agency').get('Name') if (y.get('Agency') and y.get('Agency').get('Name')) else ''
AgencyPhoneNumber = y.get('Agency').get('PhoneNumber') if (y.get('Agency') and y.get('Agency').get('PhoneNumber')) else ''
IsRealEstateAgency = y.get('Agency').get('IsRealEstateAgency') if (y.get('Agency') and y.get('Agency').get('IsRealEstateAgency')) else ''
IsLicensedPropertyAgency = y.get('Agency').get('IsLicensedPropertyAgency') if (y.get('Agency') and y.get('Agency').get('IsLicensedPropertyAgency')) else ''
property_tuple = (ListingId,Title,Category,RentPerWeek,SmokersOkay,StartDate,EndDate,IsFeatured,HasGallery,IsBold,IsHighlighted,AsAt,CategoryPath,PictureHref,RegionId,Region,SuburbId,Suburb,ReserveState,IsClassified,Latitude,Longitude,Northing,Easting,Accuracy,PriceDisplay,Address,District,AgencyReference,LandArea,Bathrooms,Bedrooms,ListingGroup,Parking,PropertyType,PropertyId,DistrictId,AgencyId,AgencyName,AgencyPhoneNumber,IsRealEstateAgency,IsLicensedPropertyAgency)
num_records += 1
property_tuple_all.append(property_tuple)
print 'finished page'
print 'number of records by counter ' + str(num_records)
print 'length of property_tuple ' + str(len(property_tuple_all))
print 'starting database entry'
conn = sqlite3.connect(dbPath) #conn = sqlite3.connect(":memory:")
with conn:
cur = conn.cursor()
#cur.execute("INSERT INTO Cars VALUES(1,'Audi',52642)")
cur.executemany("INSERT INTO rental_listings VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", property_tuple_all)
print 'finished rental database entry'
|
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoin-wallet."""
import hashlib
import os
import stat
import subprocess
import textwrap
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
BUFFER_SIZE = 16 * 1024
class ToolWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_wallet_tool()
def bitcoin_wallet_process(self, *args):
binary = self.config["environment"]["BUILDDIR"] + '/src/bitcoin-wallet' + self.config["environment"]["EXEEXT"]
args = ['-datadir={}'.format(self.nodes[0].datadir), '-chain=%s' % self.chain] + list(args)
return subprocess.Popen([binary] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
def assert_raises_tool_error(self, error, *args):
p = self.bitcoin_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(p.poll(), 1)
assert_equal(stdout, '')
assert_equal(stderr.strip(), error)
def assert_tool_output(self, output, *args):
p = self.bitcoin_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(stderr, '')
assert_equal(stdout, output)
assert_equal(p.poll(), 0)
def wallet_shasum(self):
h = hashlib.sha1()
mv = memoryview(bytearray(BUFFER_SIZE))
with open(self.wallet_path, 'rb', buffering=0) as f:
for n in iter(lambda: f.readinto(mv), 0):
h.update(mv[:n])
return h.hexdigest()
def wallet_timestamp(self):
return os.path.getmtime(self.wallet_path)
def wallet_permissions(self):
return oct(os.lstat(self.wallet_path).st_mode)[-3:]
def log_wallet_timestamp_comparison(self, old, new):
result = 'unchanged' if new == old else 'increased!'
self.log.debug('Wallet file timestamp {}'.format(result))
def test_invalid_tool_commands_and_args(self):
self.log.info('Testing that various invalid commands raise with specific error messages')
self.assert_raises_tool_error('Invalid command: foo', 'foo')
# `bitcoin-wallet help` raises an error. Use `bitcoin-wallet -help`.
self.assert_raises_tool_error('Invalid command: help', 'help')
self.assert_raises_tool_error('Error: two methods provided (info and create). Only one method should be provided.', 'info', 'create')
self.assert_raises_tool_error('Error parsing command line arguments: Invalid parameter -foo', '-foo')
locked_dir = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets")
error = 'Error initializing wallet database environment "{}"!'.format(locked_dir)
if self.options.descriptors:
error = "SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another bitcoind?"
self.assert_raises_tool_error(
error,
'-wallet=' + self.default_wallet_name,
'info',
)
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "nonexistent.dat")
self.assert_raises_tool_error("Failed to load database path '{}'. Path does not exist.".format(path), '-wallet=nonexistent.dat', 'info')
def test_tool_wallet_info(self):
# Stop the node to close the wallet to call the info command.
self.stop_node(0)
self.log.info('Calling wallet tool info, testing output')
#
# TODO: Wallet tool info should work with wallet file permissions set to
# read-only without raising:
# "Error loading wallet.dat. Is wallet being used by another process?"
# The following lines should be uncommented and the tests still succeed:
#
# self.log.debug('Setting wallet file permissions to 400 (read-only)')
# os.chmod(self.wallet_path, stat.S_IRUSR)
# assert self.wallet_permissions() in ['400', '666'] # Sanity check. 666 because Appveyor.
# shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling info: {}'.format(timestamp_before))
if self.options.descriptors:
out = textwrap.dedent('''\
Wallet info
===========
Name: default_wallet
Format: sqlite
Descriptors: yes
Encrypted: no
HD (hd seed available): yes
Keypool Size: 6
Transactions: 0
Address Book: 1
''')
else:
out = textwrap.dedent('''\
Wallet info
===========
Name: \
Format: bdb
Descriptors: no
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2
Transactions: 0
Address Book: 3
''')
self.assert_tool_output(out, '-wallet=' + self.default_wallet_name, 'info')
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling info: {}'.format(timestamp_after))
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
self.log.debug('Setting wallet file permissions back to 600 (read/write)')
os.chmod(self.wallet_path, stat.S_IRUSR | stat.S_IWUSR)
assert self.wallet_permissions() in ['600', '666'] # Sanity check. 666 because Appveyor.
#
# TODO: Wallet tool info should not write to the wallet file.
# The following lines should be uncommented and the tests still succeed:
#
# assert_equal(timestamp_before, timestamp_after)
# shasum_after = self.wallet_shasum()
# assert_equal(shasum_before, shasum_after)
# self.log.debug('Wallet file shasum unchanged\n')
def test_tool_wallet_info_after_transaction(self):
"""
Mutate the wallet with a transaction to verify that the info command
output changes accordingly.
"""
self.start_node(0)
self.log.info('Generating transaction to mutate wallet')
self.nodes[0].generate(1)
self.stop_node(0)
self.log.info('Calling wallet tool info after generating a transaction, testing output')
shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling info: {}'.format(timestamp_before))
if self.options.descriptors:
out = textwrap.dedent('''\
Wallet info
===========
Name: default_wallet
Format: sqlite
Descriptors: yes
Encrypted: no
HD (hd seed available): yes
Keypool Size: 6
Transactions: 1
Address Book: 1
''')
else:
out = textwrap.dedent('''\
Wallet info
===========
Name: \
Format: bdb
Descriptors: no
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2
Transactions: 1
Address Book: 3
''')
self.assert_tool_output(out, '-wallet=' + self.default_wallet_name, 'info')
shasum_after = self.wallet_shasum()
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling info: {}'.format(timestamp_after))
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
#
# TODO: Wallet tool info should not write to the wallet file.
# This assertion should be uncommented and succeed:
# assert_equal(timestamp_before, timestamp_after)
assert_equal(shasum_before, shasum_after)
self.log.debug('Wallet file shasum unchanged\n')
def test_tool_wallet_create_on_existing_wallet(self):
self.log.info('Calling wallet tool create on an existing wallet, testing output')
shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling create: {}'.format(timestamp_before))
out = textwrap.dedent('''\
Topping up keypool...
Wallet info
===========
Name: foo
Format: bdb
Descriptors: no
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2000
Transactions: 0
Address Book: 0
''')
self.assert_tool_output(out, '-wallet=foo', 'create')
shasum_after = self.wallet_shasum()
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling create: {}'.format(timestamp_after))
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
assert_equal(timestamp_before, timestamp_after)
assert_equal(shasum_before, shasum_after)
self.log.debug('Wallet file shasum unchanged\n')
def test_getwalletinfo_on_different_wallet(self):
self.log.info('Starting node with arg -wallet=foo')
self.start_node(0, ['-nowallet', '-wallet=foo'])
self.log.info('Calling getwalletinfo on a different wallet ("foo"), testing output')
shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling getwalletinfo: {}'.format(timestamp_before))
out = self.nodes[0].getwalletinfo()
self.stop_node(0)
shasum_after = self.wallet_shasum()
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling getwalletinfo: {}'.format(timestamp_after))
assert_equal(0, out['txcount'])
assert_equal(1000, out['keypoolsize'])
assert_equal(1000, out['keypoolsize_hd_internal'])
assert_equal(True, 'hdseedid' in out)
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
assert_equal(timestamp_before, timestamp_after)
assert_equal(shasum_after, shasum_before)
self.log.debug('Wallet file shasum unchanged\n')
def test_salvage(self):
# TODO: Check salvage actually salvages and doesn't break things. https://github.com/bitcoin/bitcoin/issues/7463
self.log.info('Check salvage')
self.start_node(0)
self.nodes[0].createwallet("salvage")
self.stop_node(0)
self.assert_tool_output('', '-wallet=salvage', 'salvage')
def run_test(self):
self.wallet_path = os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename)
self.test_invalid_tool_commands_and_args()
# Warning: The following tests are order-dependent.
self.test_tool_wallet_info()
self.test_tool_wallet_info_after_transaction()
if not self.options.descriptors:
# TODO: Wallet tool needs more create options at which point these can be enabled.
self.test_tool_wallet_create_on_existing_wallet()
self.test_getwalletinfo_on_different_wallet()
# Salvage is a legacy wallet only thing
self.test_salvage()
if __name__ == '__main__':
ToolWalletTest().main()
|
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Pelix remote services: MQTT discovery provider
A discovery packet contains JSON representation of an ImportEndpoint bean.
This module depends on the paho-mqtt package (ex-mosquitto), provided by the
Eclipse Foundation: see http://www.eclipse.org/paho
:author: Thomas Calmant
:copyright: Copyright 2016, Thomas Calmant
:license: Apache License 2.0
:version: 0.6.4
..
Copyright 2016 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import logging
# MQTT client
import pelix.misc.mqtt_client
# iPOPO decorators
from pelix.ipopo.decorators import ComponentFactory, Requires, Provides, \
Validate, Property, Invalidate
# Pelix & Remote services
from pelix.remote.edef_io import EDEFWriter, EDEFReader
import pelix.constants as constants
import pelix.remote
import pelix.remote.beans as beans
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (0, 6, 4)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
EVENT_ADD = "add"
EVENT_UPDATE = "update"
EVENT_REMOVE = "remove"
EVENT_LOST = "lost"
EVENT_DISCOVER = "discover"
ENDPOINT_EVENTS = (EVENT_ADD, EVENT_UPDATE, EVENT_REMOVE)
# ------------------------------------------------------------------------------
@ComponentFactory(pelix.remote.FACTORY_DISCOVERY_MQTT)
@Provides(pelix.remote.SERVICE_EXPORT_ENDPOINT_LISTENER, "_controller")
@Requires("_dispatcher", pelix.remote.SERVICE_DISPATCHER)
@Requires("_registry", pelix.remote.SERVICE_REGISTRY)
@Property("_host", "mqtt.host", "localhost")
@Property("_port", "mqtt.port", 1883)
@Property("_prefix", "topic.prefix", "pelix/{appid}/remote-services")
@Property("_appid", "application.id", None)
class MqttDiscovery(object):
"""
Remote Service discovery provider based on MQTT
"""
def __init__(self):
"""
Sets up members
"""
# Imports registry
self._registry = None
# Exports registry
self._dispatcher = None
# Service controller
self._controller = False
# Framework UID
self._framework_uid = None
# MQTT server properties
self._host = "localhost"
self._port = 1883
# MQTT topic Properties
self._prefix = ""
self._appid = None
# MQTT client
self.__mqtt = None
# Real prefix
self._real_prefix = ""
@Validate
def _validate(self, context):
"""
Component validated
"""
# Format the topic prefix
self._real_prefix = self._prefix.format(appid=self._appid or "")
# Avoid double slashes
self._real_prefix = self._real_prefix.replace("//", "/")
# Get the framework UID
self._framework_uid = context.get_property(constants.FRAMEWORK_UID)
# Create the MQTT client
self.__mqtt = pelix.misc.mqtt_client.MqttClient()
# Customize callbacks
self.__mqtt.on_connect = self.__on_connect
self.__mqtt.on_disconnect = self.__on_disconnect
self.__mqtt.on_message = self.__on_message
# Prepare the will packet
self.__mqtt.set_will(self._make_topic(EVENT_LOST),
self._framework_uid, qos=2)
# Prepare the connection
self.__mqtt.connect(self._host, self._port)
@Invalidate
def _invalidate(self, context):
"""
Component invalidated
"""
# Send the "lost" message
mid = self.__send_message(EVENT_LOST, self._framework_uid, True)
self.__mqtt.wait_publication(mid, 10)
# Disconnect from the server (this stops the loop)
self.__mqtt.disconnect()
# Clean up
self._framework_uid = None
self.__mqtt = None
def _make_topic(self, event):
"""
Prepares a MQTT topic name for the given event
:param event: An event type (add, update, remove)
:return: A MQTT topic
"""
return "{0}/{1}".format(self._real_prefix, event)
def __on_connect(self, client, rc):
"""
Client connected to the server
"""
if not rc:
# Connection is OK, subscribe to the topic
client.subscribe(self._make_topic("#"))
# Provide the service
self._controller = True
# Send a discovery packet
self.__send_message(EVENT_DISCOVER, self._framework_uid)
def __on_disconnect(self, client, rc):
"""
Client has been disconnected from the server
"""
# Disconnected: stop providing the service
self._controller = False
def __on_message(self, client, msg):
"""
A message has been received from a server
:param client: Client that received the message
:param msg: A MQTTMessage bean
"""
# Get the topic
topic = msg.topic
# Extract the event
event = topic.rsplit("/", 1)[1]
try:
if event in ENDPOINT_EVENTS:
# Parse the endpoints (from EDEF XML to ImportEndpoint)
endpoints_descr = EDEFReader().parse(msg.payload)
endpoints = [endpoint.to_import()
for endpoint in endpoints_descr]
if not endpoints or \
endpoints[0].framework == self._framework_uid:
# No enpoints to read or Loopback message
return
# Give the list of endpoints to the handler
parameter = endpoints
else:
# Give the payload as is to other event handlers
parameter = msg.payload
try:
getattr(self, "_handle_{0}".format(event))(parameter)
except AttributeError:
_logger.error("Unhandled MQTT event: %s", event)
except Exception as ex:
_logger.exception("Error handling an MQTT message '%s': %s",
topic, ex)
def __send_message(self, event, payload, wait=False):
"""
Sends a message through the MQTT connection
:param event: Remote service event name
:param payload: Message content
:return: The local message ID
"""
# Publish the MQTT message (QoS 2 - Exactly Once)
return self.__mqtt.publish(self._make_topic(event), payload, qos=2,
wait=wait)
def _handle_add(self, endpoints):
"""
A set of endpoints have been registered
:param endpoints: Parsed ImportEndpoint beans
"""
# Notify the import registry
for endpoint in endpoints:
self._registry.add(endpoint)
def _handle_update(self, endpoints):
"""
A set of endpoints have been updated
:param endpoints: Parsed ImportEndpoint beans
"""
# Notify the import registry
for endpoint in endpoints:
self._registry.update(endpoint.uid, endpoint.properties)
def _handle_remove(self, endpoints):
"""
A set of endpoints has been removed
:param endpoints: Parsed ImportEndpoint beans
"""
# Notify the import registry
for endpoint in endpoints:
self._registry.remove(endpoint.uid)
def _handle_discover(self, payload):
"""
A framework wants to discover all services
:param payload: The UID of the sender
"""
if payload == self._framework_uid:
# We are the sender, ignore this message
return
# Get the list of our exported services
endpoints = self._dispatcher.get_endpoints()
if not endpoints:
# Nothing to say
return
# Convert the beans to XML (EDEF format)
xml_string = EDEFWriter().to_string(
beans.EndpointDescription.from_export(endpoint)
for endpoint in endpoints)
# Send the message
self.__send_message(EVENT_ADD, xml_string)
def _handle_lost(self, payload):
"""
A framework has been lost
:param payload: The UID of the lost framework
"""
self._registry.lost_framework(payload)
def endpoints_added(self, endpoints):
"""
Multiple endpoints have been added
:param endpoints: A list of ExportEndpoint beans
"""
# Convert the beans to XML (EDEF format)
xml_string = EDEFWriter().to_string(
beans.EndpointDescription.from_export(endpoint)
for endpoint in endpoints)
# Send the message
self.__send_message(EVENT_ADD, xml_string)
def endpoint_updated(self, endpoint, old_properties):
"""
An end point is updated
:param endpoint: The updated endpoint
:param old_properties: Previous properties of the endpoint
"""
# Convert the endpoint into an EndpointDescription bean
endpoint_desc = beans.EndpointDescription.from_export(endpoint)
# Convert the bean to XML (EDEF format)
xml_string = EDEFWriter().to_string([endpoint_desc])
# Send the message
self.__send_message(EVENT_UPDATE, xml_string)
def endpoint_removed(self, endpoint):
"""
An end point is removed
:param endpoint: Endpoint being removed
"""
# Convert the endpoint into an EndpointDescription bean
endpoint_desc = beans.EndpointDescription.from_export(endpoint)
# Convert the bean to XML (EDEF format)
xml_string = EDEFWriter().to_string([endpoint_desc])
# Send the message
self.__send_message(EVENT_REMOVE, xml_string)
|
|
""" Class definition for a BleedOut."""
import numpy as np
from collections.abc import Iterable
import openmdao.api as om
from pycycle.thermo.cea import species_data
from pycycle.thermo.thermo import Thermo
from pycycle.flow_in import FlowIn
from pycycle.passthrough import PassThrough
from pycycle.element_base import Element
class BleedCalcs(om.ExplicitComponent):
def initialize(self):
self.options.declare('bleed_names', types=Iterable, desc='list of names for the bleed ports')
def setup(self):
self.add_input('W_in', val=30.0, units='lbm/s', desc='entrance mass flow')
self.add_output('W_out', shape=1, units='lbm/s', desc='exit mass flow', res_ref=1e2)
# bleed inputs and outputs
for BN in self.options['bleed_names']:
self.add_input(BN+':frac_W', val=0.0, desc='bleed mass flow fraction (W_bld/W_in)')
self.add_output(BN+':stat:W', shape=1, units='lbm/s', desc='bleed mass flow', res_ref=1e2)
self.declare_partials(BN+':stat:W', ['W_in', BN+':frac_W'])
self.declare_partials('W_out', ['W_in', '*:frac_W'])
def compute(self, inputs, outputs):
# calculate flow and power without bleed flows
outputs['W_out'] = inputs['W_in']
# calculate bleed specific outputs and modify exit flow and power
for BN in self.options['bleed_names']:
outputs[BN+':stat:W'] = inputs['W_in'] * inputs[BN+':frac_W']
outputs['W_out'] -= outputs[BN+':stat:W']
def compute_partials(self, inputs, J):
# Jacobian elements without bleed flows
J['W_out','W_in'] = 1.0
for BN in self.options['bleed_names']:
J['W_out','W_in'] -= inputs[BN+':frac_W']
J['W_out',BN+':frac_W'] = -inputs['W_in']
J[BN+':stat:W','W_in'] = inputs[BN+':frac_W']
J[BN+':stat:W',BN+':frac_W'] = inputs['W_in']
class BleedOut(Element):
"""
bleed extration from the incomming flow
--------------
Flow Stations
--------------
Fl_I -> primary input flow
Fl_O -> primary output flow
Fl_{bleed_name} -> bleed output flows
one for each name in `bleed_names` option
-------------
Design
-------------
inputs
--------
{bleed_name}:frac_W
fraction of incoming flow to bleed off to FL_{bleed_name}
MN
-------------
Off-Design
-------------
inputs
--------
area
"""
def initialize(self):
self.options.declare('statics', default=True,
desc='If True, calculate static properties.')
self.options.declare('bleed_names', types=(list,tuple), desc='list of names for the bleed ports',
default=[])
self.default_des_od_conns = [
# (design src, off-design target)
('Fl_O:stat:area', 'area')
]
super().initialize()
def pyc_setup_output_ports(self):
self.copy_flow('Fl_I', 'Fl_O')
for b_name in self.options['bleed_names']:
self.copy_flow('Fl_I', b_name)
def setup(self):
thermo_method = self.options['thermo_method']
thermo_data = self.options['thermo_data']
statics = self.options['statics']
design = self.options['design']
bleeds = self.options['bleed_names']
composition = self.Fl_I_data['Fl_I']
# Create inlet flowstation
flow_in = FlowIn(fl_name='Fl_I')
self.add_subsystem('flow_in', flow_in, promotes=['Fl_I:tot:*', 'Fl_I:stat:*'])
# Bleed flow calculations
blds = BleedCalcs(bleed_names=bleeds)
bld_port_globs = [f'{bn}:*' for bn in bleeds]
self.add_subsystem('bld_calcs', blds,
promotes_inputs=[('W_in', 'Fl_I:stat:W'), '*:frac_W'],
promotes_outputs=['W_out']+bld_port_globs)
bleed_names = []
for BN in bleeds:
bleed_names.append(BN+'_flow')
bleed_flow = Thermo(mode='total_TP', fl_name=BN+":tot",
method=thermo_method,
thermo_kwargs={'composition':composition,
'spec':thermo_data})
self.add_subsystem(BN+'_flow', bleed_flow,
promotes_inputs=[('composition', 'Fl_I:tot:composition'),('T','Fl_I:tot:T'),('P','Fl_I:tot:P')],
promotes_outputs=['{}:tot:*'.format(BN)])
# Total Calc
real_flow = Thermo(mode='total_TP', fl_name="Fl_O:tot",
method=thermo_method,
thermo_kwargs={'composition':composition,
'spec':thermo_data})
prom_in = [('composition', 'Fl_I:tot:composition'),('T','Fl_I:tot:T'),('P','Fl_I:tot:P')]
self.add_subsystem('real_flow', real_flow, promotes_inputs=prom_in,
promotes_outputs=['Fl_O:*'])
if statics:
if design:
# Calculate static properties
out_stat = Thermo(mode='static_MN', fl_name="Fl_O:stat",
method=thermo_method,
thermo_kwargs={'composition':composition,
'spec':thermo_data})
prom_in = [('composition', 'Fl_I:tot:composition'),
'MN']
prom_out = ['Fl_O:stat:*']
self.add_subsystem('out_stat', out_stat, promotes_inputs=prom_in,
promotes_outputs=prom_out)
self.connect('Fl_O:tot:S', 'out_stat.S')
self.connect('Fl_O:tot:h', 'out_stat.ht')
self.connect('Fl_O:tot:P', 'out_stat.guess:Pt')
self.connect('Fl_O:tot:gamma', 'out_stat.guess:gamt')
self.connect('W_out', 'out_stat.W')
else:
# Calculate static properties
out_stat = Thermo(mode='static_A', fl_name="Fl_O:stat",
method=thermo_method,
thermo_kwargs={'composition':composition,
'spec':thermo_data})
prom_in = [('composition', 'Fl_I:tot:composition'),
'area']
prom_out = ['Fl_O:stat:*']
self.add_subsystem('out_stat', out_stat, promotes_inputs=prom_in,
promotes_outputs=prom_out)
self.connect('Fl_O:tot:S', 'out_stat.S')
self.connect('Fl_O:tot:h', 'out_stat.ht')
self.connect('Fl_O:tot:P', 'out_stat.guess:Pt')
self.connect('Fl_O:tot:gamma', 'out_stat.guess:gamt')
self.connect('W_out', 'out_stat.W')
else:
self.add_subsystem('W_passthru', PassThrough('W_out', 'Fl_O:stat:W', 1.0, units= "lbm/s"),
promotes=['*'])
super().setup()
if __name__ == "__main__":
p = om.Problem()
des_vars = p.model.add_subsystem('des_vars', om.IndepVarComp(), promotes=['*'])
des_vars.add_output('Fl_I:stat:W', 60.0, units='lbm/s')
des_vars.add_output('test1:frac_W', 0.05, units=None)
des_vars.add_output('test2:frac_W', 0.05, units=None)
des_vars.add_output('Fl_I:tot:T', 518.67, units='degR')
des_vars.add_output('Fl_I:tot:P', 14.696, units='psi')
des_vars.add_output('MN', 0.25)
p.model.add_subsystem('bleed', BleedOut(design=True, statics=True, bleed_names=['test1','test2']), promotes=['*'])
p.setup(check=False)
p.run_model()
print('W',p['Fl_I:stat:W'],p['Fl_O:stat:W'],p['test1:stat:W'],p['test2:stat:W'])
print('T',p['Fl_I:tot:T'],p['Fl_O:tot:T'],p['test1:tot:T'],p['test2:tot:T'])
print('P',p['Fl_I:tot:P'],p['Fl_O:tot:P'],p['test1:tot:P'],p['test2:tot:P'])
# p.check_partials()
|
|
# -*- coding: utf-8 -*-
#
# Warthog - Simple client for A10 load balancers
#
# Copyright 2014-2016 Smarter Travel
#
# Available under the MIT license. See LICENSE for details.
#
"""
warthog.cli
~~~~~~~~~~~
CLI interface for interacting with a load balancer using the Warthog client.
"""
import functools
import os
import os.path
import click
import requests
import warthog
import warthog.api
from .packages import six
def error_wrapper(func):
"""Decorator that coverts possible errors raised by the WarthogClient
into instances of ClickExceptions so that they may be rendered automatically
"""
# pylint: disable=missing-docstring
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except warthog.api.WarthogNoSuchNodeError as e:
raise click.BadParameter("{0} doesn't appear to be a known node".format(e.server))
except warthog.api.WarthogAuthFailureError as e:
raise click.ClickException(
"Authentication with the load balancer failed. The error was: {0}".format(e))
except requests.ConnectionError as e:
raise click.ClickException(
"Connecting to the load balancer failed. The error was {0}".format(e))
return wrapper
class WarthogClientFacade(object):
"""Wrapper around a :class:`warthog.client.WarthogClient` that coverts
exceptions encountered into exceptions that click will handle automatically.
"""
def __init__(self, client):
self._client = client
# pylint: disable=missing-docstring
@error_wrapper
def get_status(self, *args, **kwargs):
return self._client.get_status(*args, **kwargs)
# pylint: disable=missing-docstring
@error_wrapper
def get_connections(self, *args, **kwargs):
return self._client.get_connections(*args, **kwargs)
# pylint: disable=missing-docstring
@error_wrapper
def disable_server(self, *args, **kwargs):
return self._client.disable_server(*args, **kwargs)
# pylint: disable=missing-docstring
@error_wrapper
def enable_server(self, *args, **kwargs):
return self._client.enable_server(*args, **kwargs)
@click.group()
@click.version_option(version=warthog.__version__)
@click.option(
'--config',
help='Path to a configuration file to use for the load balancer API.',
type=click.Path(dir_okay=False))
@click.option(
'--enable-platform-warning',
help=('Enable warnings from underlying libraries when running on older Python '
'versions known to cause intermittent failures of SSL/TLS connections.'),
is_flag=True)
# pylint: disable=unused-argument
def main(config, enable_platform_warning):
"""Interact with a load balancer using the Warthog client."""
# We don't actually do anything with the config file argument at this point.
# The idea here is that we shouldn't be parsing the config file until we really
# need it (like when we're creating a client instance). This allows us to display
# help for subcommands without requiring the user to set up a config file first
# (which would be really annoying).
# Unless the user has specifically asked for this warning, we disable it because
# it makes the CLI unusable on Python 2.6 or Python 2.7 < 2.7.9 (which is what CentOS
# runs ATM).
if not enable_platform_warning:
disable_platform_warning()
def get_client(config):
"""Construct a new wrapped client based on the specified config file."""
# Passing the config file unconditionally here since if the user hasn't
# specified one it'll be None and the config loader will use the default
# locations.
loader = warthog.api.WarthogConfigLoader(config_file=config)
try:
# Expected errors that might be raised during parsing. These will
# already have nice user-facing messages so we just reraise them as
# BadParameter exceptions with the same message.
loader.initialize()
except warthog.api.WarthogConfigError as e:
raise click.ClickException(six.text_type(e))
settings = loader.get_settings()
# Wrap the client in a facade that translates expected errors into
# exceptions that click will render as error messages for the user.
return WarthogClientFacade(warthog.api.WarthogClient(
settings.scheme_host,
settings.username,
settings.password,
ssl_version=settings.ssl_version,
verify=settings.verify))
def disable_platform_warning():
"""Disable the SSL warnings emitted by urllib3. This is the default
behavior unless the caller specifically asks for these warnings.
See https://github.com/smarter-travel-media/warthog/issues/5
"""
import warnings
from urllib3.exceptions import InsecurePlatformWarning, SNIMissingWarning
warnings.filterwarnings("ignore", category=InsecurePlatformWarning)
warnings.filterwarnings("ignore", category=SNIMissingWarning)
@click.command()
@click.argument('server')
@click.pass_context
def enable(ctx, server):
"""Enable a server by hostname."""
client = get_client(ctx.parent.params['config'])
if not client.enable_server(server):
click.echo('{0} could not be enabled'.format(server))
ctx.exit(1)
@click.command()
@click.argument('server')
@click.pass_context
def disable(ctx, server):
"""Disable a server by hostname."""
client = get_client(ctx.parent.params['config'])
if not client.disable_server(server):
click.echo('{0} could not be disabled'.format(server))
ctx.exit(1)
@click.command()
@click.argument('server')
@click.pass_context
def status(ctx, server):
"""Get the status of a server by hostname."""
client = get_client(ctx.parent.params['config'])
click.echo(client.get_status(server))
@click.command()
@click.argument('server')
@click.pass_context
def connections(ctx, server):
"""Get active connections to a server by hostname."""
client = get_client(ctx.parent.params['config'])
click.echo(client.get_connections(server))
@click.command('default-config')
def default_config():
"""Print a default configuration file."""
click.echo(os.linesep.join([
'[warthog]',
'scheme_host = https://lb.example.com',
'username = username',
'password = password',
'verify = yes',
'ssl_version = TLSv1_2'
]))
@click.command('config-path')
def config_path():
"""Print the config file search PATH."""
click.echo(os.linesep.join(warthog.api.DEFAULT_CONFIG_LOCATIONS))
main.add_command(enable)
main.add_command(disable)
main.add_command(status)
main.add_command(connections)
main.add_command(default_config)
main.add_command(config_path)
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import numpy as np
from pymatgen.util.coord import Simplex
from functools import cmp_to_key
from scipy.spatial import HalfspaceIntersection, ConvexHull
from pymatgen.analysis.pourbaix.entry import MultiEntry
from six.moves import zip
import warnings
"""
Class for analyzing Pourbaix Diagrams. Similar to PDAnalyzer
"""
__author__ = "Sai Jayaraman"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.0"
__maintainer__ = "Sai Jayaraman"
__credits__ = "Arunima Singh, Joseph Montoya"
__email__ = "sjayaram@mit.edu"
__status__ = "Development"
__date__ = "Nov 7, 2012"
class PourbaixAnalyzer(object):
"""
Class for performing analysis on Pourbaix Diagrams
Args:
pd: Pourbaix Diagram to analyze.
"""
numerical_tol = 1e-8
def __init__(self, pd):
self._pd = pd
self._keys = ['H+', 'V', '1']
self.chempot_limits = None
def get_facet_chempots(self, facet):
"""
Calculates the chemical potentials for each element within a facet.
Args:
facet: Facet of the phase diagram.
Returns:
{ element: chempot } for all elements in the phase diagram.
"""
entrylist = [self._pd.qhull_entries[i] for i in facet]
energylist = [self._pd.qhull_entries[i].g0 for i in facet]
m = self._make_comp_matrix(entrylist)
chempots = np.dot(np.linalg.inv(m), energylist)
return dict(zip(self._keys, chempots))
def _make_comp_matrix(self, entrylist):
"""
Helper function to generates a normalized composition matrix from a
list of Pourbaix Entries
"""
return np.array([[entry.npH, entry.nPhi, 1] for entry in entrylist])
def get_chempot_range_map(self, limits=[[-2,16], [-4,4]]):
"""
Returns a chemical potential range map for each stable entry.
This function works by using scipy's HalfspaceIntersection
function to construct all of the 2-D polygons that form the
boundaries of the planes corresponding to individual entry
gibbs free energies as a function of pH and V. Hyperplanes
of the form a*pH + b*V + 1 - g(0, 0) are constructed and
supplied to HalfspaceIntersection, which then finds the
boundaries of each pourbaix region using the intersection
points.
Args:
limits ([[float]]): limits in which to do the pourbaix
analysis
Returns:
Returns a dict of the form {entry: [boundary_points]}.
The list of boundary points are the sides of the N-1
dim polytope bounding the allowable ph-V range of each entry.
"""
tol = PourbaixAnalyzer.numerical_tol
all_chempots = []
facets = self._pd.facets
for facet in facets:
chempots = self.get_facet_chempots(facet)
chempots["H+"] /= -0.0591
chempots["V"] = -chempots["V"]
chempots["1"] = chempots["1"]
all_chempots.append([chempots[el] for el in self._keys])
# Get hyperplanes corresponding to G as function of pH and V
halfspaces = []
qhull_data = np.array(self._pd._qhull_data)
stable_entries = self._pd.stable_entries
stable_indices = [self._pd.qhull_entries.index(e)
for e in stable_entries]
qhull_data = np.array(self._pd._qhull_data)
hyperplanes = np.vstack([-0.0591 * qhull_data[:, 0], -qhull_data[:, 1],
np.ones(len(qhull_data)), -qhull_data[:, 2]])
hyperplanes = np.transpose(hyperplanes)
max_contribs = np.max(np.abs(hyperplanes), axis=0)
g_max = np.dot(-max_contribs, [limits[0][1], limits[1][1], 0, 1])
# Add border hyperplanes and generate HalfspaceIntersection
border_hyperplanes = [[-1, 0, 0, limits[0][0]],
[1, 0, 0, -limits[0][1]],
[0, -1, 0, limits[1][0]],
[0, 1, 0, -limits[1][1]],
[0, 0, -1, 2 * g_max]]
hs_hyperplanes = np.vstack([hyperplanes[stable_indices],
border_hyperplanes])
interior_point = np.average(limits, axis=1).tolist() + [g_max]
hs_int = HalfspaceIntersection(hs_hyperplanes, np.array(interior_point))
# organize the boundary points by entry
pourbaix_domains = {entry: [] for entry in stable_entries}
for intersection, facet in zip(hs_int.intersections,
hs_int.dual_facets):
for v in facet:
if v < len(stable_entries):
pourbaix_domains[stable_entries[v]].append(intersection)
# Remove entries with no pourbaix region
pourbaix_domains = {k: v for k, v in pourbaix_domains.items() if v}
pourbaix_domain_vertices = {}
for entry, points in pourbaix_domains.items():
points = np.array(points)[:, :2]
# Initial sort to ensure consistency
points = points[np.lexsort(np.transpose(points))]
center = np.average(points, axis=0)
points_centered = points - center
# Sort points by cross product of centered points,
# isn't strictly necessary but useful for plotting tools
point_comparator = lambda x, y: x[0]*y[1] - x[1]*y[0]
points_centered = sorted(points_centered,
key=cmp_to_key(point_comparator))
points = points_centered + center
# Create simplices corresponding to pourbaix boundary
simplices = [Simplex(points[indices])
for indices in ConvexHull(points).simplices]
pourbaix_domains[entry] = simplices
pourbaix_domain_vertices[entry] = points
self.pourbaix_domains = pourbaix_domains
self.pourbaix_domain_vertices = pourbaix_domain_vertices
return pourbaix_domains
def _in_facet(self, facet, entry):
"""
Checks if a Pourbaix Entry is in a facet.
Args:
facet: facet to test.
entry: Pourbaix Entry to test.
"""
dim = len(self._keys)
if dim > 1:
coords = [np.array(self._pd.qhull_data[facet[i]][0:dim - 1])
for i in range(len(facet))]
simplex = Simplex(coords)
comp_point = [entry.npH, entry.nPhi]
return simplex.in_simplex(comp_point,
PourbaixAnalyzer.numerical_tol)
else:
return True
def _get_facets(self, entry):
"""
Get the facets that an entry falls into.
"""
memberfacets = list()
for facet in self._pd.facets:
if self._in_facet(facet, entry):
memberfacets.append(facet)
return memberfacets
def _get_facet(self, entry):
"""
Get any facet that a Pourbaix Entry falls into.
"""
for facet in self._pd.facets:
if self._in_facet(facet, entry):
return facet
raise RuntimeError("No facet found for comp = {}".format(entry.name))
def _get_all_facets(self, entry):
"""
Get all the facets that a Pourbaix Entry falls into
"""
all_facets = []
for facet in self._pd.facets:
if self._in_facet(facet,entry):
all_facets.append(facet)
return all_facets
raise RuntimeError("No facet found for comp = {}".format(entry.name))
def _get_facet_entries(self, facet):
"""
Get the entries corresponding to a facet
"""
entries = []
for vertex in facet:
entries.append(self._pd.qhull_entries[vertex])
return entries
def g(self, entry, pH, V):
"""
Get free energy for a given pH, and V.
"""
g0 = entry.g0
npH = -entry.npH * 0.0591
nPhi = -entry.nPhi
return g0 - npH * pH - nPhi * V
def get_all_decomp_and_e_above_hull(self, single_entry):
"""
Computes the decomposition entries, species and hull energies
for all the multi-entries which have the "material" as the only solid.
Args:
single_entry: single entry for which to find all of the
decompositions
Returns:
(decomp_entries, hull_energies, decomp_species, entries)
for all multi_entries containing the single_entry as the
only solid
"""
decomp_entries, hull_energies, decomp_species, entries = [], [], [], []
# for all entries where the material is the only solid
if not self._pd._multielement:
possible_entries = [e for e in self._pd.all_entries
if single_entry == e]
else:
possible_entries = [e for e in self._pd.all_entries
if e.phases.count("Solid") == 1
and single_entry in e.entrylist]
for possible_entry in possible_entries:
# Find the decomposition details if the material
# is in the Pourbaix Multi Entry or Pourbaix Entry
facets = self._get_all_facets(possible_entry)
for facet in facets:
entrylist = [self._pd.qhull_entries[i] for i in facet]
m = self._make_comp_matrix(entrylist)
compm = self._make_comp_matrix([possible_entry])
decomp_amts = np.dot(np.linalg.inv(m.transpose()), compm.transpose())
decomp, decomp_names = {}, {}
for i, decomp_amt in enumerate(decomp_amts):
if abs(decomp_amt[0]) > PourbaixAnalyzer.numerical_tol:
decomp[self._pd.qhull_entries[facet[i]]] = decomp_amt[0]
decomp_entries.append(decomp)
hull_energy = sum([entry.g0 * amt for entry, amt in decomp.items()])
hull_energies.append(possible_entry.g0 - hull_energy)
entries.append(possible_entry)
return decomp_entries, hull_energies, entries
def get_decomposition(self, entry):
"""
Provides the decomposition at a particular composition
Args:
comp: A composition
Returns:
Decomposition as a dict of {PourbaixEntry: amount}
"""
facet = self._get_facet(entry)
entrylist = [self._pd.qhull_entries[i] for i in facet]
m = self._make_comp_matrix(entrylist)
compm = self._make_comp_matrix([entry])
decomp_amts = np.dot(np.linalg.inv(m.transpose()), compm.transpose())
decomp = dict()
self.decomp_names = dict()
#Scrub away zero amounts
for i in range(len(decomp_amts)):
if abs(decomp_amts[i][0]) > PourbaixAnalyzer.numerical_tol:
decomp[self._pd.qhull_entries[facet[i]]] = decomp_amts[i][0]
self.decomp_names[self._pd.qhull_entries[facet[i]].name] = decomp_amts[i][0]
return decomp
def get_decomp_and_e_above_hull(self, entry):
"""
Provides the decomposition and energy above convex hull for an entry
Args:
entry: A PourbaixEntry
Returns:
(decomp, energy above convex hull) Stable entries should have
energy above hull of 0.
"""
g0 = entry.g0
decomp = self.get_decomposition(entry)
hull_energy = sum([entry.g0 * amt
for entry, amt in decomp.items()])
return decomp, g0 - hull_energy, self.decomp_names
def get_e_above_hull(self, entry):
"""
Provides the energy above convex hull for an entry
Args:
entry: A PourbaixEntry object
Returns:
Energy above convex hull of entry. Stable entries should have
energy above hull of 0.
"""
return self.get_decomp_and_e_above_hull(entry)[1]
# TODO: we might want to rename this, still a bit ambiguous
def get_gibbs_free_energy(self, pH, V):
"""
Provides the gibbs free energy of the Pourbaix stable entry
at a given pH and V
Args:
pH: pH
V: potential vs SHE
Returns:
gibbs free energy (eV/atom)
"""
data = {}
for entry in self._pd.stable_entries:
data.update({entry.name: self.g(entry, pH, V)})
gibbs_energy = min(data.values())
stable_entry = [k for k, v in data.items() if v == gibbs_energy]
return (gibbs_energy, stable_entry)
def _min_multientry_from_single_entry(self, single_entry):
"""
Gives lowest energy multi-entry from single entry
Args:
single_entry (PourbaixEntry): pourbaix entry to find valid
multientries from
"""
de, ehulls, entries = self.get_all_decomp_and_e_above_hull(single_entry)
if not ehulls:
raise ValueError("No entries where {} is the only solid".format(
single_entry.name))
return entries[np.argmin(ehulls)]
def get_entry_stability(self, entry, pH, V):
"""
Get the energy difference between an entry and the
most stable decomposition product (i.e. the pourbaix-stable
entry) at a given pH and voltage.
Args:
entry (PourbaixEntry): Pourbaix entry or MultiEntry
corresponding to the stability to be calculated
pH (float): pH at which to calculate stability of entry
V (float): voltage at which to calculate stability of entry
"""
if self._pd._multielement and not isinstance(entry, MultiEntry):
_, _, entries = self.get_all_decomp_and_e_above_hull(entry)
warnings.warn("{} is not a multi-entry, calculating stability of "
"representative {} multientry")
gs = [self.g(e, pH, V) for e in entries]
return min(gs) - self.get_gibbs_free_energy(pH, V)[0]
return self.g(entry, pH, V) - self.get_gibbs_free_energy(pH, V)[0]
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Network is a composition of Layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import weakref
from tensorflow.python.eager import context
from tensorflow.python.estimator import util as estimator_util
from tensorflow.python.framework import ops
from tensorflow.python.layers import base
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
# pylint: disable=protected-access
# Explanation for protected-access disable: Network has lots of same-class and
# parent-class references across different objects, and some to private
# functions in base.py which should be reused.
_DeferredRestoration = collections.namedtuple(
"_DeferredRestoration",
[
# The map_func to use (either user-specified or the default).
"map_func",
# Boolean, True if the user specified an explicit map_func, for error
# messages.
"map_func_is_user",
# A mapping from checkpoint names to initial values of not-yet-created
# variables which should be restored. These values come from parsing a
# checkpoint.
"checkpointed_variables_to_restore",
# A mapping from checkpoint name to variable objects of variables which
# have already been restored, for error checking.
"restored_variables",
# The session to restore with (if in graph mode).
"session",
# Names of the Network where the restore was requested, for error
# messages.
"network_name",
"network_scope_name"
])
def _default_naming_conflict_error_message(
mapped_name, first_variable, second_variable,
network_name, network_scope_name):
return (
("The default checkpoint variable name mapping strategy for Network "
"'%s' resulted in a naming conflict. We attempted to strip off the "
"variable prefix for the Network ('%s'), but this resulted in two "
"variables named '%s' (originally '%s' and '%s'). This should only "
"happen when using variable sharing (i.e. the Network contains Networks "
"or Layers which were first added to another Network, and therefore "
"have that Network's variable prefix). One solution is to pass "
"`map_func=lambda n: n` to Network.save and Network.restore to use "
"fully qualified variable names in the checkpoint, although this will "
"require that the variable prefix of the Network being restored into "
"is also '%s'. You may alternatively write an arbitrary mapping.")
% (
network_name, network_scope_name, mapped_name,
first_variable._shared_name,
second_variable._shared_name, network_scope_name
))
def _restore_custom_map_func_error_message(
mapped_name, first_variable, second_variable,
network_name, network_scope_name):
return (
("The map_func passed to Network.restore for the Network '%s' "
"resulted in two variables named '%s' (originally '%s' and '%s'). Since "
"this is also an error on Network.save, this Network was "
"probably not saved with this map_func. Note that map_func "
"always maps from full variable names to checkpoint names; "
"there is no need to specify an inverse mapping.\n\n"
"Try stripping less from the variable names, or renaming parts "
"of the Network. For reference, variables created by sub-Layers "
"of this Network are prefixed with '%s', but if they are "
"re-used after being added to another Network they will have "
"that Network's full variable prefix instead.") % (
network_name, mapped_name,
first_variable._shared_name,
second_variable._shared_name,
network_scope_name))
def _make_custom_getter_for_deferred_restorations():
"""Returns a custom getter which searches `deferred_restorations`.
Returns: A tuple of (_custom_getter, deferred_restorations)
_custom_getter: The getter which should be added to variable_scopes where
variables will be created.
deferred_restorations: A list for _DeferredRestoration objects. Typically
empty when the getter is set, and expanded as deferred restorations are
requested. All new deferred restorations should be appended to the end of
the list, where they will have priority over older deferred restorations.
"""
deferred_restorations = []
def _custom_getter(getter, name, shape=None, dtype=None,
initializer=None,
*args, **kwargs):
"""A custom getter which processes deferred restorations."""
# Iterate over restorations, newest first (newer restorations will take
# precedence over older restorations, just like with immediate restorations
# into existing variables).
delayed_restoration = None
found_value = False
value_to_restore = None
for delayed_restoration in reversed(
deferred_restorations):
checkpoint_name = delayed_restoration.map_func(name)
if (checkpoint_name
in delayed_restoration.checkpointed_variables_to_restore):
found_value = True
value_to_restore = (
delayed_restoration.checkpointed_variables_to_restore[
checkpoint_name])
if found_value:
break
# value_to_restore may be False because this variable is not in any
# checkpoint we are restoring, or None because we have explicitly set it to
# None when it was previously fetched. In either case, we don't need to
# set an initializer.
if found_value and value_to_restore is not None:
initializer = value_to_restore
shape = None
variable = getter(name, shape=shape, dtype=dtype, initializer=initializer,
*args, **kwargs)
if found_value and value_to_restore is not None:
# Mark as already restored from this checkpoint.
delayed_restoration.checkpointed_variables_to_restore[
checkpoint_name] = None
if context.in_graph_mode():
delayed_restoration.session.run(variable.initializer)
if found_value:
# Error checking should run even if we've already restored a value.
if delayed_restoration.restored_variables.setdefault(
checkpoint_name, variable) is not variable:
# Naming conflict. We've tried to initialize two variables with the
# same value from the checkpoint.
if delayed_restoration.map_func_is_user:
raise ValueError(
_restore_custom_map_func_error_message(
mapped_name=checkpoint_name,
first_variable=delayed_restoration.restored_variables[
checkpoint_name],
second_variable=variable,
network_name=delayed_restoration.network_name,
network_scope_name=delayed_restoration.network_scope_name))
else:
raise ValueError(
_default_naming_conflict_error_message(
mapped_name=checkpoint_name,
first_variable=delayed_restoration.restored_variables[
checkpoint_name],
second_variable=variable,
network_name=delayed_restoration.network_name,
network_scope_name=delayed_restoration.network_scope_name))
return variable
return _custom_getter, deferred_restorations
class Network(base.Layer):
"""Represents the composition of a set of Layers.
TODO(josh11b,ashankar):
- Should "trainable" be changeable on the Network object?
- Do we allow add_variable in Network?
- Detect layers used in __call__ that weren't registered with track_layer.
- Convert inputs to __call__ to tensors.
- Prevent variables from being created after the first __call__?
(Think about restoring from a checkpoint).
"""
def __init__(self, name=None):
if isinstance(name, variable_scope.VariableScope):
raise ValueError("VariableScopes are not valid Network names.")
if name is not None and "/" in name:
raise ValueError(
"Forward slashes ('/') are not allowed in Network names.")
super(Network, self).__init__(name=name)
self._layers = []
self._sub_layer_name_uids = collections.defaultdict(int)
# Initially None, but set to False for networks which are first built as
# top-level.
self._first_parent = None # A weak reference to our first parent.
self._non_network_sublayers = []
self._owned_layers = {}
# The scope to use if we end up without a parent.
self._default_parent_variable_scope = variable_scope.get_variable_scope()
self._custom_getter, self._deferred_restorations = (
_make_custom_getter_for_deferred_restorations())
def _init_set_name(self, name):
# Anonymous Networks (name=None) defer setting a final name until they are
# (1) added to another Network, or (2) built/called (where (2) is only used
# for a "top level" network).
#
# However, if we were provided an explicit name (name is not None), that
# will always be the final name of the Network; if it turns out not to be
# unique or if variable names can't be prefixed by it we will throw an
# error.
self._name = name
self._base_name = None
def _finalize_name(self, parent_network):
if not self._name:
if not parent_network:
name_uid_map = base._get_default_graph_uid_map()
else:
name_uid_map = parent_network._sub_layer_name_uids
# Were were not passed a name explicitly (or it was blank), so this is an
# anonymous Network. We make up a unique name.
if parent_network:
avoid_names = parent_network._owned_layers
else:
avoid_names = None
self._name, self._base_name = self._make_unique_name(
name_uid_map=name_uid_map, avoid_names=avoid_names)
if self._first_parent is None or (self._first_parent # False = no parent
and self._first_parent() is None):
# Save a pointer to the parent Network so that we can later check that the
# scope name we get is correct.
if not parent_network:
self._first_parent = parent_network
else:
self._first_parent = weakref.ref(parent_network)
def _set_scope(self, scope=None):
if self._scope is None:
if not self._first_parent:
first_parent = self._first_parent
else:
first_parent = self._first_parent()
if first_parent is None:
# If we were never added to another Network, or that Network has beed
# garbage collected before being called, then we're a top-level Network.
self._finalize_name(
# Use False to make sure the value sticks and we don't inherit a
# parent if we're added to a network later.
parent_network=False)
if scope is not None:
raise ValueError("Networks may not be created with explicit scopes.")
if first_parent:
first_parent._set_scope()
parent_scope = first_parent._scope
else:
parent_scope = self._default_parent_variable_scope
with variable_scope.variable_scope(parent_scope):
# Make sure variables with this prefix will be unique.
with variable_scope.variable_scope(
None, use_resource=True, default_name=self._name) as scope:
self._scope = scope
scope_name = scope.name
suffix_start = scope_name.rfind("/") + 1
# rfind is -1 if there is no slash in the string, in which case the
# suffix starts at the beginning of the string (there is no prefix).
scope_suffix = scope_name[suffix_start:]
scope_prefix = scope_name[:suffix_start]
if scope_suffix != self._name:
raise ValueError(
("A Network named '%s' already exists (or a variable_scope was "
"created with this name). Names must be unique.") % (
self._name,))
if (first_parent
and scope_prefix[:-1] != first_parent._scope.name):
raise ValueError(
("Network variable names must match a nesting of sub-Network "
"names. Expected prefix '%s' from parent network, but got "
"'%s' when attempting to create a variable_scope for Network "
"'%s'. Likely an explicit variable_scope was inserted into "
"the nesting.") % (
first_parent._scope.name,
scope_prefix[:-1],
self._name))
elif not first_parent and scope_prefix:
# For the case when this Network is not nested inside any other
# Network, but is in a variable_scope. This is an error for now.
raise ValueError(
"Creating Networks inside named variable_scopes is currently "
"not supported (to ensure that variable names match the names "
"of Networks in which they were first created). To set "
"options, try `with tf.variable_scope(''):`. If this "
"limitation bothers you, please file a feature request.")
for non_network_sublayer in self._non_network_sublayers:
self._set_scope_for_nonnetwork_sublayer(non_network_sublayer)
def _set_scope_for_nonnetwork_sublayer(self, sublayer):
if sublayer._scope is None:
if sublayer._first_parent is None:
constituent_first_parent = None
else:
constituent_first_parent = sublayer._first_parent()
if constituent_first_parent:
constituent_first_parent._set_scope()
parent_scope = constituent_first_parent._scope
else:
self._finalize_name(False)
raise ValueError(
("The parent of a Layer added to Network %s was garbage collected "
"before the Layer was built. If this limitation bothers you "
"please, comment on "
"https://github.com/tensorflow/tensorflow/issues/14164.") %
(self.name,))
with variable_scope.variable_scope(parent_scope):
# Horrid hack to make Layer variable names which are direct
# sub-layers of Networks conform to the Network variable naming
# conventions.
with variable_scope.variable_scope(
None, use_resource=True,
default_name=sublayer.name) as sub_scope:
sublayer._scope = sub_scope
@base.Layer.name.getter
def name(self):
if self._name is None:
raise ValueError(
"The network does not yet have a final name, but a name was "
"requested for it. Networks get a name when they are added to "
"another Network via track_layer, or when they are first "
"called/built.")
return self._name
def track_layer(self, layer):
"""Track a Layer in this Network.
`Network` requires that all `Layer`s used in `call()` be tracked so that the
`Network` can export a complete list of variables.
Args:
layer: A `tf.layers.Layer` object.
Returns:
The passed in `layer`.
Raises:
RuntimeError: If __init__ has not been called.
TypeError: If `layer` is the wrong type.
ValueError: If a `Layer` with the same name has already been added.
"""
if not hasattr(self, "_layers"):
raise RuntimeError("Need to call Network.__init__ before adding layers")
if not isinstance(layer, base.Layer):
raise TypeError(
"Network.track_layer() passed type %s, not a tf.layers.Layer" %
(type(layer),))
if isinstance(layer, Network):
layer._finalize_name(parent_network=self)
else:
# `layer` is a non-Network, so it hasn't been named to follow Network
# conventions for contained Layers (i.e. the same conventions as for
# sub-Networks). This renaming is necessary to isolate Network variable
# naming from Layers constructed outside the Network and never added to it
# (because Layers are named globally).
if not layer.built:
if not hasattr(layer, "_first_parent"):
dereferenced_layer_first_parent = None
else:
dereferenced_layer_first_parent = layer._first_parent()
if dereferenced_layer_first_parent is None:
if layer._name != layer._base_name:
# If name and base_name do not match, then this Layer used anonymous
# naming and we have to rename it. Otherwise there's an explicit
# name, and we should respect it (subject to error checking).
layer._name, layer._base_name = layer._make_unique_name(
name_uid_map=self._sub_layer_name_uids,
avoid_names=self._owned_layers)
layer._first_parent = weakref.ref(self)
self._non_network_sublayers.append(layer)
if (not layer.built
and layer._first_parent
and self is layer._first_parent()):
if layer.name in self._owned_layers:
if self._owned_layers[layer.name] is layer:
return layer
raise ValueError(
"Attempt to add two Layers with the name '%s' to the same Network."
% (layer.name))
self._owned_layers[layer.name] = layer
self._layers.append(layer)
return layer
def get_layer(self, name=None, index=None):
"""Get a contained `tf.layers.Layer` either by name or index.
Args:
name: String matching one of the names of a contained `Layer`. Note that
the names of `Layer`s added to `Network`s may not be unique when doing
layer sharing (i.e. adding a `Layer` to this `Network` which was already
added to another `Network`). The lowest index `Layer` with a matching
name will be returned.
index: Integer in [0, number of layers). Layers are assigned an index
by the order they are added.
Returns:
A `tf.layers.Layer` object.
Raises:
ValueError: If neither or both of 'index' or 'name' is specified, or the
lookup failed.
"""
if index is not None:
if name is not None:
raise ValueError("Exactly one of 'index' or 'name' must be provided")
if len(self._layers) <= index:
raise ValueError("Was asked to retrieve layer at index " + str(index) +
" but model only has " + str(len(self._layers)) +
" layers.")
else:
return self._layers[index]
else:
if not name:
raise ValueError("Provide either a layer name or layer index.")
for layer in self._layers:
if layer.name == name:
return layer
raise ValueError("No such layer: " + name)
# The following methods are for implementing the Layer interface.
@property
def weights(self):
# TODO(josh11b): Should this return a set or perform de-duplication of
# variables in the case of shared layers/variables that appear in
# multiple places in the Network?
weights = []
for layer in self._layers:
weights += layer.weights
return weights
@property
def trainable_weights(self):
weights = []
for layer in self._layers:
weights += layer.trainable_weights
return weights
@property
def non_trainable_weights(self):
weights = []
for layer in self._layers:
weights += layer.non_trainable_weights
return weights
@property
def trainable(self):
return True
@trainable.setter
def trainable(self, value):
if not value:
# We believe it better to decide which layers & networks are trainable
# at the Trainer level than here. Otherwise you can run into trouble if a
# layer/network is shared between two models, but is trainable in one
# but not the other (like with adversarial networks).
raise AttributeError("cannot mark Network as not trainable")
@property
def layers(self):
return self._layers
def add_variable(self, name, shape, dtype=None, initializer=None,
regularizer=None, trainable=True, constraint=None):
raise RuntimeError(
"add_variable not supported in Network class yet. Please file an issue "
"at https://github.com/tensorflow/tensorflow/issues/new if this is "
"important to you")
def _strip_variable_prefix(self, original_variable_name):
"""The default map_func for saving or restoring variables.
Strips the variable prefix for the Network on which save/restore was called,
and leaves other variable names fully qualified in the checkpoint.
Args:
original_variable_name: The _shared_name of the variable (no :0
suffix) to map.
Returns:
The checkpoint name of the variable.
"""
scope_name_with_slash = self.scope_name + "/"
if original_variable_name.startswith(scope_name_with_slash):
return original_variable_name[len(scope_name_with_slash):]
else:
return original_variable_name
def save(self, save_path, global_step=None, map_func=None):
"""Save variables from the Network to a checkpoint.
Args:
save_path: Either a checkpoint prefix or the name of a directory to save
the checkpoint in (in which case the checkpoint will be named based on
the Network name).
global_step: The global step to use when naming the checkpoint. If None
(default), we will first try to get the default global step. If that
fails because no default global step exists, then the checkpoint is
created without a global step suffix.
map_func: A function mapping fully qualified variable names
(e.g. 'my_network_1/dense_1/kernel') to names in the checkpoint. By
default (if `map_func=None`), the variable prefix for the network being
restored (`Network.scope_name + '/'`, e.g. 'my_network_1/') is stripped
and all other variable names (shared with other Networks) are left
unchanged.
Returns:
The checkpoint prefix for the saved checkpoint, which may be passed to
`Network.restore`.
Raises:
ValueError: If the Network has not yet been called, or if map_func results
in a name collision.
"""
if not self.built:
raise ValueError(
"Attempt to save the Network before it was first called. This means "
"variables have not yet been created, so there is nothing to save.")
self._set_scope() # scope_name should be available to map_funcs
if global_step is None:
global_step = training_util.get_global_step()
if os.path.isdir(save_path):
# If we were passed a directory, default to naming based on the Network
# name.
save_path = os.path.join(save_path, self.name)
user_map_func = map_func
if map_func is None:
map_func = self._strip_variable_prefix
variable_map = {}
for variable in self.variables:
mapped_name = map_func(variable._shared_name)
if variable_map.setdefault(mapped_name, variable) is not variable:
if user_map_func is None:
# Instead of erroring out, we could just re-try and silently use the
# full variable names in the checkpoint. This could be odd for deeply
# nested sub-Networks (since the full prefix from the nesting would
# get added), so for now we'll let the user deal with this case.
raise ValueError(_default_naming_conflict_error_message(
mapped_name=mapped_name,
first_variable=variable_map[mapped_name],
second_variable=variable,
network_name=self.name,
network_scope_name=self.scope_name))
else:
# The user passed their own problematic map_func.
raise ValueError(
("The map_func passed to Network.save for the Network '%s' "
"resulted in two variables named '%s' ('%s' and '%s'). Try "
"stripping less from the variable names, or renaming parts of "
"the Network. For reference, variables created by sub-Layers of "
"this Network are prefixed with '%s', but if they are re-used "
"after being added to another Network, they will have that "
"Network's full variable prefix instead.") % (
self.name, mapped_name,
variable_map[mapped_name]._shared_name,
variable._shared_name,
self.scope_name))
if context.in_eager_mode():
sess = None
else:
sess = ops.get_default_session()
return saver_lib.Saver(variable_map).save(
sess=sess, save_path=save_path, write_meta_graph=False,
global_step=global_step)
def _restore_existing_variables(self, save_path, map_func, user_map_func):
"""Use a standard Saver to restore existing variables from a checkpoint.
Args:
save_path: The checkpoint prefix or directory to read from.
map_func: The function to use when mapping from variable names to
checkpoint names.
user_map_func: The original map_func passed by the user, for error
checking.
Returns:
A dictionary mapping from checkpoint names to variable objects which have
been restored (for bookkeeping to avoid deferred restorations on these
variables).
Raises:
ValueError: If there is a name collision.
"""
existing_variables_by_checkpoint_name = {}
for variable in self.variables:
checkpoint_name = map_func(variable._shared_name)
if existing_variables_by_checkpoint_name.setdefault(
checkpoint_name, variable) is not variable:
if user_map_func is None:
raise ValueError(_default_naming_conflict_error_message(
mapped_name=checkpoint_name,
first_variable=existing_variables_by_checkpoint_name[
checkpoint_name],
second_variable=variable,
network_name=self.name,
network_scope_name=self.scope_name))
else:
raise ValueError(_restore_custom_map_func_error_message(
mapped_name=checkpoint_name,
first_variable=existing_variables_by_checkpoint_name[
checkpoint_name],
second_variable=variable,
network_name=self.name,
network_scope_name=self.scope_name))
if existing_variables_by_checkpoint_name:
if context.in_eager_mode():
sess = None
else:
sess = ops.get_default_session()
saver_lib.Saver(var_list=existing_variables_by_checkpoint_name).restore(
sess=sess, save_path=save_path)
return existing_variables_by_checkpoint_name
def _set_restore_on_create(self, save_path, map_func, user_map_func,
existing_variables_by_checkpoint_name):
"""If necessary, request deferred restorations of variables."""
checkpoint_reader = checkpoint_utils.load_checkpoint(save_path)
checkpointed_variables_to_restore = {}
for checkpoint_name, _ in checkpoint_utils.list_variables(save_path):
if checkpoint_name in existing_variables_by_checkpoint_name:
# This variable was already created and restored.
continue
# Save the variable for later restoration in a custom getter.
checkpointed_variables_to_restore[checkpoint_name] = (
checkpoint_reader.get_tensor(checkpoint_name))
# Only set a deferred restoration if there are checkpoint variables which
# have not been assigned to existing variables. Note that this loses out on
# some opportunity for error checking, but avoids creating
# _DeferredRestoration objects once a Network has been built (so that
# restoring in a loop does not take increasing amounts of memory).
if checkpointed_variables_to_restore:
if context.in_eager_mode():
sess = None
else:
sess = ops.get_default_session()
# We need a name for error messages. If we haven't been added to another
# Network yet, we're top-level.
self._finalize_name(False)
self._set_scope()
# Save a record of this restoration for use in the custom getter.
deferred_restoration = _DeferredRestoration(
map_func=map_func,
map_func_is_user=(user_map_func is not None),
checkpointed_variables_to_restore=checkpointed_variables_to_restore,
restored_variables={},
session=sess,
network_name=self.name,
network_scope_name=self.scope_name)
self._deferred_restorations.append(deferred_restoration)
# Add the deferred registration to non-Network children, and request that
# Networks propagate the request to their children.
self._add_deferred_restoration(deferred_restoration)
def _add_deferred_restoration(self, deferred_restoration):
"""Add a deferred restoration to this Network and all children.
Restorations which are requested later have higher priority, and the highest
priority matching restoration is applied to a variable when it is created.
Args:
deferred_restoration: A _DeferredRestoration object.
"""
# Networks don't create variables at the moment, so this append isn't
# strictly necessary. We could get by with only adding deferred restorations
# to non-Network Layers.
self._set_scope()
# We use set_custom_getter because it avoids recursively calling up the
# variable_scope tree. We've done the tree traversal ourselves and have
# added the request to each Layer which needs it.
self._scope.set_custom_getter(self._custom_getter)
self._deferred_restorations.append(deferred_restoration)
for layer in self.layers:
if isinstance(layer, Network):
# For Networks, request that they propagate this deferred restoration
# to all of their children recursively.
layer._add_deferred_restoration(deferred_restoration)
else:
# For non-Network Layers, make sure they have a deferred restoration
# queue and a custom getter, then add our request to it.
if not hasattr(layer, "_custom_getter"):
assert not hasattr(layer, "_deferred_restorations")
layer._custom_getter, layer._deferred_restorations = (
_make_custom_getter_for_deferred_restorations())
self._set_scope_for_nonnetwork_sublayer(layer)
layer._scope.set_custom_getter(layer._custom_getter)
layer._deferred_restorations.append(deferred_restoration)
def restore(self, save_path, map_func=None):
"""Restore the Network from a checkpoint.
If variables have already been created (typically when some or all of the
`Network` is built), they are assigned values from the checkpoint
immediately, overwriting any existing values (in graph mode the default
session is used for the assignments).
If there are checkpoint entries which do not correspond to any existing
variables in the `Network`, these values are saved for deferred restoration;
their initial values will be the checkpointed values once they are
created. Requests for multiple deferred restorations behave the same way as
immediate restorations, in that later requests will take priority over
earlier requests relevant to the same variable.
If this `Network` shares `Layer`s with another network, those `Layer`s will
also have their variables restored from the checkpoint.
Args:
save_path: The return value of `Network.save`, or a directory to search
for a checkpoint.
map_func: A function mapping fully qualified variable names
(e.g. 'my_network_1/dense_1/kernel') to names in the checkpoint. By
default (if `map_func=None`), the variable prefix for the network being
restored (`Network.scope_name + '/'`, e.g. 'my_network_1/') is stripped
and all other variable names (shared with other Networks) are left
unchanged. Note that this is the _same_ map_func as `Network.save`, not
an inverse mapping.
"""
self._finalize_name(parent_network=False)
self._set_scope() # scope_name should be available to map_funcs
if os.path.isdir(save_path):
# If we don't have a name yet, set no parent.
save_path = os.path.join(save_path, self.name)
user_map_func = map_func
if map_func is None:
map_func = self._strip_variable_prefix
# Step one is to restore any existing variables from the checkpoint.
existing_variables_by_checkpoint_name = self._restore_existing_variables(
save_path=save_path,
map_func=map_func,
user_map_func=user_map_func)
# Step two is to set a custom getter which restores variables on creation,
# for those variables which have not been added to sub-Layers yet.
self._set_restore_on_create(
save_path=save_path,
map_func=map_func,
user_map_func=user_map_func,
existing_variables_by_checkpoint_name=(
existing_variables_by_checkpoint_name))
# TODO(josh11b): Support other Layer methods needed for graph mode, such as for
# losses and updates
class Sequential(Network):
"""Represents a linear sequence of Layers or functions.
The output of each layer/function is provided as the input to the next.
The inputs passed to `__call__` are passed to the inputs of the first
Layer, and it returns the outputs of the last Layer.
Args:
layers_funcs: An optional sequence where each element is either a
tf.layers.Layer object or a callable.
name: An optional string name to use for this Network.
"""
def __init__(self, layers_funcs=None, name=None):
super(Sequential, self).__init__(name=name)
self._layers_funcs = []
if layers_funcs:
for l in layers_funcs:
self.add(l)
def add(self, layer_func):
if isinstance(layer_func, base.Layer):
args = estimator_util.fn_args(layer_func.call)
self.track_layer(layer_func)
elif callable(layer_func):
args = estimator_util.fn_args(layer_func)
else:
raise TypeError(
"Sequential.add() takes only tf.layers.Layer objects or callables; "
"not '%s' of type '%s'." % (layer_func, type(layer_func)))
self._layers_funcs.append((("training" in args), layer_func))
def call(self, inputs, training=None):
"""Call each Layer in the order they were added."""
# TODO(josh11b): Support "mode" and maybe other arguments
if training is None:
for _, l in self._layers_funcs:
inputs = l(inputs)
else:
for has_training_arg, l in self._layers_funcs:
if has_training_arg:
inputs = l(inputs, training)
else:
inputs = l(inputs)
return inputs
|
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from iso8601 import iso8601
import mock
from oslo_versionedobjects import fields
from sqlalchemy import sql
from cinder import context
from cinder import db
from cinder.db.sqlalchemy import models
from cinder import exception
from cinder import objects
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_objects
from cinder.tests.unit import objects as test_objects
class TestCinderObjectVersionHistory(test_objects.BaseObjectsTestCase):
def test_add(self):
history = test_objects.obj_base.CinderObjectVersionsHistory()
v10 = {'Backup': '2.0'}
v11 = {'Backup': '2.1'}
history.add('1.0', v10)
history.add('1.1', v11)
# We have 3 elements because we have the liberty version by default
self.assertEqual(2 + 1, len(history))
expected_v10 = history['liberty'].copy()
expected_v10.update(v10)
expected_v11 = history['liberty'].copy()
expected_v11.update(v11)
self.assertEqual('1.1', history.get_current())
self.assertEqual(expected_v11, history.get_current_versions())
self.assertEqual(expected_v10, history['1.0'])
def test_add_existing(self):
history = test_objects.obj_base.CinderObjectVersionsHistory()
history.add('1.0', {'Backup': '1.0'})
self.assertRaises(exception.ProgrammingError,
history.add, '1.0', {'Backup': '1.0'})
class TestCinderObject(test_objects.BaseObjectsTestCase):
"""Tests methods from CinderObject."""
def setUp(self):
super(TestCinderObject, self).setUp()
self.obj = fake_objects.ChildObject(
scheduled_at=None,
uuid=uuid.uuid4(),
text='text')
self.obj.obj_reset_changes()
def test_cinder_obj_get_changes_no_changes(self):
self.assertDictEqual({}, self.obj.cinder_obj_get_changes())
def test_cinder_obj_get_changes_other_changes(self):
self.obj.text = 'text2'
self.assertDictEqual({'text': 'text2'},
self.obj.cinder_obj_get_changes())
def test_cinder_obj_get_changes_datetime_no_tz(self):
now = datetime.datetime.utcnow()
self.obj.scheduled_at = now
self.assertDictEqual({'scheduled_at': now},
self.obj.cinder_obj_get_changes())
def test_cinder_obj_get_changes_datetime_tz_utc(self):
now_tz = iso8601.parse_date('2015-06-26T22:00:01Z')
now = now_tz.replace(tzinfo=None)
self.obj.scheduled_at = now_tz
self.assertDictEqual({'scheduled_at': now},
self.obj.cinder_obj_get_changes())
def test_cinder_obj_get_changes_datetime_tz_non_utc_positive(self):
now_tz = iso8601.parse_date('2015-06-26T22:00:01+01')
now = now_tz.replace(tzinfo=None) - datetime.timedelta(hours=1)
self.obj.scheduled_at = now_tz
self.assertDictEqual({'scheduled_at': now},
self.obj.cinder_obj_get_changes())
def test_cinder_obj_get_changes_datetime_tz_non_utc_negative(self):
now_tz = iso8601.parse_date('2015-06-26T10:00:01-05')
now = now_tz.replace(tzinfo=None) + datetime.timedelta(hours=5)
self.obj.scheduled_at = now_tz
self.assertDictEqual({'scheduled_at': now},
self.obj.cinder_obj_get_changes())
@mock.patch('cinder.objects.base.CinderPersistentObject.get_by_id')
def test_refresh(self, get_by_id):
@objects.base.CinderObjectRegistry.register_if(False)
class MyTestObject(objects.base.CinderObject,
objects.base.CinderObjectDictCompat,
objects.base.CinderComparableObject,
objects.base.CinderPersistentObject):
fields = {'id': fields.UUIDField(),
'name': fields.StringField()}
test_obj = MyTestObject(id=fake.OBJECT_ID, name='foo')
refresh_obj = MyTestObject(id=fake.OBJECT_ID, name='bar')
get_by_id.return_value = refresh_obj
test_obj.refresh()
self._compare(self, refresh_obj, test_obj)
@mock.patch('cinder.objects.base.CinderPersistentObject.get_by_id')
def test_refresh_readonly(self, get_by_id_mock):
@objects.base.CinderObjectRegistry.register_if(False)
class MyTestObject(objects.base.CinderObject,
objects.base.CinderObjectDictCompat,
objects.base.CinderComparableObject,
objects.base.CinderPersistentObject):
fields = {'id': fields.UUIDField(),
'name': fields.StringField(read_only=True)}
test_obj = MyTestObject(id=fake.OBJECT_ID, name='foo')
refresh_obj = MyTestObject(id=fake.OBJECT_ID, name='bar')
get_by_id_mock.return_value = refresh_obj
test_obj.refresh()
self._compare(self, refresh_obj, test_obj)
def test_refresh_no_id_field(self):
@objects.base.CinderObjectRegistry.register_if(False)
class MyTestObjectNoId(objects.base.CinderObject,
objects.base.CinderObjectDictCompat,
objects.base.CinderComparableObject,
objects.base.CinderPersistentObject):
fields = {'uuid': fields.UUIDField()}
test_obj = MyTestObjectNoId(uuid=fake.OBJECT_ID, name='foo')
self.assertRaises(NotImplementedError, test_obj.refresh)
@mock.patch('cinder.objects.base.objects', mock.Mock())
def test_cls_init(self):
"""Test that class init method gets called on registration."""
@objects.base.CinderObjectRegistry.register
class MyTestObject(objects.base.CinderObject,
objects.base.CinderPersistentObject):
cinder_ovo_cls_init = mock.Mock()
MyTestObject.cinder_ovo_cls_init.assert_called_once_with()
class TestCinderComparableObject(test_objects.BaseObjectsTestCase):
def test_comparable_objects(self):
@objects.base.CinderObjectRegistry.register
class MyComparableObj(objects.base.CinderObject,
objects.base.CinderObjectDictCompat,
objects.base.CinderComparableObject):
fields = {'foo': fields.Field(fields.Integer())}
class NonVersionedObject(object):
pass
obj1 = MyComparableObj(foo=1)
obj2 = MyComparableObj(foo=1)
obj3 = MyComparableObj(foo=2)
obj4 = NonVersionedObject()
self.assertTrue(obj1 == obj2)
self.assertFalse(obj1 == obj3)
self.assertFalse(obj1 == obj4)
self.assertNotEqual(obj1, None)
class TestCinderObjectConditionalUpdate(test.TestCase):
def setUp(self):
super(TestCinderObjectConditionalUpdate, self).setUp()
self.context = context.get_admin_context()
def _create_volume(self):
vol = {
'display_description': 'Test Desc',
'size': 1,
'status': 'available',
'availability_zone': 'az',
'host': 'dummy',
'attach_status': 'no',
}
volume = objects.Volume(context=self.context, **vol)
volume.create()
return volume
def _create_snapshot(self, volume):
snapshot = objects.Snapshot(context=self.context, volume_id=volume.id)
snapshot.create()
return snapshot
def _check_volume(self, volume, status, size, reload=False, dirty_keys=(),
**kwargs):
if reload:
volume = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual(status, volume.status)
self.assertEqual(size, volume.size)
dirty = volume.cinder_obj_get_changes()
self.assertEqual(list(dirty_keys), list(dirty.keys()))
for key, value in kwargs.items():
self.assertEqual(value, getattr(volume, key))
def test_conditional_update_non_iterable_expected(self):
volume = self._create_volume()
# We also check that we can check for None values
self.assertTrue(volume.conditional_update(
{'status': 'deleting', 'size': 2},
{'status': 'available', 'migration_status': None}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 2)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 2, True)
def test_conditional_update_non_iterable_expected_model_field(self):
volume = self._create_volume()
# We also check that we can check for None values
self.assertTrue(volume.conditional_update(
{'status': 'deleting', 'size': 2,
'previous_status': volume.model.status},
{'status': 'available', 'migration_status': None}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 2, previous_status='available')
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 2, True,
previous_status='available')
def test_conditional_update_non_iterable_expected_save_all(self):
volume = self._create_volume()
volume.size += 1
# We also check that we can check for not None values
self.assertTrue(volume.conditional_update(
{'status': 'deleting'},
{'status': 'available', 'availability_zone': volume.Not(None)},
save_all=True))
# Check that the object in memory has been updated and that the size
# is not a dirty key
self._check_volume(volume, 'deleting', 2)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 2, True)
def test_conditional_update_non_iterable_expected_dont_save_all(self):
volume = self._create_volume()
volume.size += 1
self.assertTrue(volume.conditional_update(
{'status': 'deleting'},
{'status': 'available'}, save_all=False))
# Check that the object in memory has been updated with the new status
# but that size has not been saved and is a dirty key
self._check_volume(volume, 'deleting', 2, False, ['size'])
# Check that the volume in the DB also has been updated but not the
# size
self._check_volume(volume, 'deleting', 1, True)
def test_conditional_update_fail_non_iterable_expected_save_all(self):
volume = self._create_volume()
volume.size += 1
self.assertFalse(volume.conditional_update(
{'status': 'available'},
{'status': 'deleting'}, save_all=True))
# Check that the object in memory has not been updated and that the
# size is still a dirty key
self._check_volume(volume, 'available', 2, False, ['size'])
# Check that the volume in the DB hasn't been updated
self._check_volume(volume, 'available', 1, True)
def test_default_conditional_update_non_iterable_expected(self):
volume = self._create_volume()
self.assertTrue(volume.conditional_update({'status': 'deleting'}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 1)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 1, True)
def test_default_conditional_fail_update_non_iterable_expected(self):
volume_in_db = self._create_volume()
volume = objects.Volume.get_by_id(self.context, volume_in_db.id)
volume_in_db.size += 1
volume_in_db.save()
# This will fail because size in DB is different
self.assertFalse(volume.conditional_update({'status': 'deleting'}))
# Check that the object in memory has not been updated
self._check_volume(volume, 'available', 1)
# Check that the volume in the DB hasn't changed the status but has
# the size we changed before the conditional update
self._check_volume(volume_in_db, 'available', 2, True)
def test_default_conditional_update_non_iterable_expected_with_dirty(self):
volume_in_db = self._create_volume()
volume = objects.Volume.get_by_id(self.context, volume_in_db.id)
volume_in_db.size += 1
volume_in_db.save()
volume.size = 33
# This will fail because even though we have excluded the size from
# the default condition when we dirtied it in the volume object, we
# still have the last update timestamp that will be included in the
# condition
self.assertFalse(volume.conditional_update({'status': 'deleting'}))
# Check that the object in memory has not been updated
self._check_volume(volume, 'available', 33, False, ['size'])
# Check that the volume in the DB hasn't changed the status but has
# the size we changed before the conditional update
self._check_volume(volume_in_db, 'available', 2, True)
def test_conditional_update_negated_non_iterable_expected(self):
volume = self._create_volume()
self.assertTrue(volume.conditional_update(
{'status': 'deleting', 'size': 2},
{'status': db.Not('in-use'), 'size': db.Not(2)}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 2)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 2, True)
def test_conditional_update_non_iterable_expected_filter(self):
# Volume we want to change
volume = self._create_volume()
# Another volume that has no snapshots
volume2 = self._create_volume()
# A volume with snapshots
volume3 = self._create_volume()
self._create_snapshot(volume3)
# Update only it it has no snapshot
filters = (~sql.exists().where(
models.Snapshot.volume_id == models.Volume.id),)
self.assertTrue(volume.conditional_update(
{'status': 'deleting', 'size': 2},
{'status': 'available'},
filters))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 2)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 2, True)
# Check that the other volumes in the DB haven't changed
self._check_volume(volume2, 'available', 1, True)
self._check_volume(volume3, 'available', 1, True)
def test_conditional_update_iterable_expected(self):
volume = self._create_volume()
self.assertTrue(volume.conditional_update(
{'status': 'deleting', 'size': 20},
{'status': ('error', 'available'), 'size': range(10)}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 20)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 20, True)
def test_conditional_update_negated_iterable_expected(self):
volume = self._create_volume()
self.assertTrue(volume.conditional_update(
{'status': 'deleting', 'size': 20},
{'status': db.Not(('creating', 'in-use')), 'size': range(10)}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 20)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 20, True)
def test_conditional_update_fail_non_iterable_expected(self):
volume = self._create_volume()
self.assertFalse(volume.conditional_update(
{'status': 'deleting'},
{'status': 'available', 'size': 2}))
# Check that the object in memory hasn't changed
self._check_volume(volume, 'available', 1)
# Check that the volume in the DB hasn't changed either
self._check_volume(volume, 'available', 1, True)
def test_conditional_update_fail_negated_non_iterable_expected(self):
volume = self._create_volume()
result = volume.conditional_update({'status': 'deleting'},
{'status': db.Not('in-use'),
'size': 2})
self.assertFalse(result)
# Check that the object in memory hasn't changed
self._check_volume(volume, 'available', 1)
# Check that the volume in the DB hasn't changed either
self._check_volume(volume, 'available', 1, True)
def test_conditional_update_fail_iterable_expected(self):
volume = self._create_volume()
self.assertFalse(volume.conditional_update(
{'status': 'available'},
{'status': ('error', 'creating'), 'size': range(2, 10)}))
# Check that the object in memory hasn't changed
self._check_volume(volume, 'available', 1)
# Check that the volume in the DB hasn't changed either
self._check_volume(volume, 'available', 1, True)
def test_conditional_update_fail_negated_iterable_expected(self):
volume = self._create_volume()
self.assertFalse(volume.conditional_update(
{'status': 'error'},
{'status': db.Not(('available', 'in-use')), 'size': range(2, 10)}))
# Check that the object in memory hasn't changed
self._check_volume(volume, 'available', 1)
# Check that the volume in the DB hasn't changed either
self._check_volume(volume, 'available', 1, True)
def test_conditional_update_fail_non_iterable_expected_filter(self):
# Volume we want to change
volume = self._create_volume()
self._create_snapshot(volume)
# A volume that has no snapshots
volume2 = self._create_volume()
# Another volume with snapshots
volume3 = self._create_volume()
self._create_snapshot(volume3)
# Update only it it has no snapshot
filters = (~sql.exists().where(
models.Snapshot.volume_id == models.Volume.id),)
self.assertFalse(volume.conditional_update(
{'status': 'deleting', 'size': 2},
{'status': 'available'},
filters))
# Check that the object in memory hasn't been updated
self._check_volume(volume, 'available', 1)
# Check that no volume in the DB also has been updated
self._check_volume(volume, 'available', 1, True)
self._check_volume(volume2, 'available', 1, True)
self._check_volume(volume3, 'available', 1, True)
def test_conditional_update_non_iterable_case_value(self):
# Volume we want to change and has snapshots
volume = self._create_volume()
self._create_snapshot(volume)
# Filter that checks if a volume has snapshots
has_snapshot_filter = sql.exists().where(
models.Snapshot.volume_id == models.Volume.id)
# We want the updated value to depend on whether it has snapshots or
# not
case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')],
else_='no-snapshot')
self.assertTrue(volume.conditional_update({'status': case_values},
{'status': 'available'}))
# Check that the object in memory has been updated
self._check_volume(volume, 'has-snapshot', 1)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'has-snapshot', 1, True)
def test_conditional_update_non_iterable_case_value_else(self):
# Volume we want to change
volume = self._create_volume()
# Filter that checks if a volume has snapshots
has_snapshot_filter = sql.exists().where(
models.Snapshot.volume_id == models.Volume.id)
# We want the updated value to depend on whether it has snapshots or
# not
case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')],
else_='no-snapshot')
self.assertTrue(volume.conditional_update({'status': case_values},
{'status': 'available'}))
# Check that the object in memory has been updated
self._check_volume(volume, 'no-snapshot', 1)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'no-snapshot', 1, True)
def test_conditional_update_non_iterable_case_value_fail(self):
# Volume we want to change doesn't have snapshots
volume = self._create_volume()
# Filter that checks if a volume has snapshots
has_snapshot_filter = sql.exists().where(
models.Snapshot.volume_id == models.Volume.id)
# We want the updated value to depend on whether it has snapshots or
# not
case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')],
else_='no-snapshot')
# We won't update because volume status is available
self.assertFalse(volume.conditional_update({'status': case_values},
{'status': 'deleting'}))
# Check that the object in memory has not been updated
self._check_volume(volume, 'available', 1)
# Check that the volume in the DB also hasn't been updated either
self._check_volume(volume, 'available', 1, True)
def test_conditional_update_iterable_with_none_expected(self):
volume = self._create_volume()
# We also check that we can check for None values in an iterable
self.assertTrue(volume.conditional_update(
{'status': 'deleting'},
{'status': (None, 'available'),
'migration_status': (None, 'finished')}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 1)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 1, True)
def test_conditional_update_iterable_with_not_none_expected(self):
volume = self._create_volume()
# We also check that we can check for None values in a negated iterable
self.assertTrue(volume.conditional_update(
{'status': 'deleting'},
{'status': volume.Not((None, 'in-use'))}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 1)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 1, True)
def test_conditional_update_iterable_with_not_includes_null(self):
volume = self._create_volume()
# We also check that negation includes None values by default like we
# do in Python and not like MySQL does
self.assertTrue(volume.conditional_update(
{'status': 'deleting'},
{'status': 'available',
'migration_status': volume.Not(('migrating', 'error'))}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 1)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 1, True)
def test_conditional_update_iterable_with_not_includes_null_fails(self):
volume = self._create_volume()
# We also check that negation excludes None values if we ask it to
self.assertFalse(volume.conditional_update(
{'status': 'deleting'},
{'status': 'available',
'migration_status': volume.Not(('migrating', 'error'),
auto_none=False)}))
# Check that the object in memory has not been updated
self._check_volume(volume, 'available', 1, False)
# Check that the volume in the DB hasn't been updated
self._check_volume(volume, 'available', 1, True)
def test_conditional_update_use_operation_in_value(self):
volume = self._create_volume()
expected_size = volume.size + 1
# We also check that using fields in requested changes will work as
# expected
self.assertTrue(volume.conditional_update(
{'status': 'deleting',
'size': volume.model.size + 1},
{'status': 'available'}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', expected_size, False)
# Check that the volume in the DB has also been updated
self._check_volume(volume, 'deleting', expected_size, True)
def test_conditional_update_auto_order(self):
volume = self._create_volume()
has_snapshot_filter = sql.exists().where(
models.Snapshot.volume_id == models.Volume.id)
case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')],
else_='no-snapshot')
values = {'status': 'deleting',
'previous_status': volume.model.status,
'migration_status': case_values}
with mock.patch('cinder.db.sqlalchemy.api.model_query') as model_query:
update = model_query.return_value.filter.return_value.update
update.return_value = 0
self.assertFalse(volume.conditional_update(
values, {'status': 'available'}))
# We check that we are passing values to update to SQLAlchemy in the
# right order
self.assertEqual(1, update.call_count)
self.assertListEqual(
[('previous_status', volume.model.status),
('migration_status', mock.ANY),
('status', 'deleting')],
list(update.call_args[0][0]))
self.assertDictEqual(
{'synchronize_session': False,
'update_args': {'preserve_parameter_order': True}},
update.call_args[1])
def test_conditional_update_force_order(self):
volume = self._create_volume()
has_snapshot_filter = sql.exists().where(
models.Snapshot.volume_id == models.Volume.id)
case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')],
else_='no-snapshot')
values = {'status': 'deleting',
'previous_status': volume.model.status,
'migration_status': case_values}
order = ['status']
with mock.patch('cinder.db.sqlalchemy.api.model_query') as model_query:
update = model_query.return_value.filter.return_value.update
update.return_value = 0
self.assertFalse(volume.conditional_update(
values, {'status': 'available'}, order=order))
# We check that we are passing values to update to SQLAlchemy in the
# right order
self.assertEqual(1, update.call_count)
self.assertListEqual(
[('status', 'deleting'),
('previous_status', volume.model.status),
('migration_status', mock.ANY)],
list(update.call_args[0][0]))
self.assertDictEqual(
{'synchronize_session': False,
'update_args': {'preserve_parameter_order': True}},
update.call_args[1])
def test_conditional_update_no_order(self):
volume = self._create_volume()
values = {'status': 'deleting',
'previous_status': 'available',
'migration_status': None}
with mock.patch('cinder.db.sqlalchemy.api.model_query') as model_query:
update = model_query.return_value.filter.return_value.update
update.return_value = 0
self.assertFalse(volume.conditional_update(
values, {'status': 'available'}))
# Check that arguments passed to SQLAlchemy's update are correct (order
# is not relevant).
self.assertEqual(1, update.call_count)
arg = update.call_args[0][0]
self.assertIsInstance(arg, dict)
self.assertEqual(set(values.keys()), set(arg.keys()))
def test_conditional_update_multitable_fail(self):
volume = self._create_volume()
self.assertRaises(exception.ProgrammingError,
volume.conditional_update,
{'status': 'deleting',
objects.Snapshot.model.status: 'available'},
{'status': 'available'})
def test_conditional_update_multitable_fail_fields_different_models(self):
volume = self._create_volume()
self.assertRaises(exception.ProgrammingError,
volume.conditional_update,
{objects.Backup.model.status: 'available',
objects.Snapshot.model.status: 'available'})
def test_conditional_update_not_multitable(self):
volume = self._create_volume()
with mock.patch('cinder.db.sqlalchemy.api._create_facade_lazily') as m:
res = volume.conditional_update(
{objects.Volume.model.status: 'deleting',
objects.Volume.model.size: 12}, reflect_changes=False)
self.assertTrue(res)
self.assertTrue(m.called)
class TestCinderDictObject(test_objects.BaseObjectsTestCase):
@objects.base.CinderObjectRegistry.register_if(False)
class TestDictObject(objects.base.CinderObjectDictCompat,
objects.base.CinderObject):
obj_extra_fields = ['foo']
fields = {
'abc': fields.StringField(nullable=True),
'def': fields.IntegerField(nullable=True),
}
@property
def foo(self):
return 42
def test_dict_objects(self):
obj = self.TestDictObject()
self.assertNotIn('non_existing', obj)
self.assertEqual('val', obj.get('abc', 'val'))
self.assertNotIn('abc', obj)
obj.abc = 'val2'
self.assertEqual('val2', obj.get('abc', 'val'))
self.assertEqual(42, obj.get('foo'))
self.assertEqual(42, obj.get('foo', None))
self.assertIn('foo', obj)
self.assertIn('abc', obj)
self.assertNotIn('def', obj)
@mock.patch('cinder.objects.base.OBJ_VERSIONS', fake_objects.MyHistory())
class TestCinderObjectSerializer(test_objects.BaseObjectsTestCase):
BACKPORT_MSG = ('Backporting %(obj_name)s from version %(src_vers)s to '
'version %(dst_vers)s')
def setUp(self):
super(TestCinderObjectSerializer, self).setUp()
self.obj = fake_objects.ChildObject(scheduled_at=None,
uuid=uuid.uuid4(),
text='text',
integer=1)
self.parent = fake_objects.ParentObject(uuid=uuid.uuid4(),
child=self.obj,
scheduled_at=None)
self.parent_list = fake_objects.ParentObjectList(objects=[self.parent])
def test_serialize_init_current_has_no_manifest(self):
"""Test that pinned to current version we have no manifest."""
serializer = objects.base.CinderObjectSerializer('1.6')
# Serializer should not have a manifest
self.assertIsNone(serializer.manifest)
def test_serialize_init_no_cap_has_no_manifest(self):
"""Test that without cap we have no manifest."""
serializer = objects.base.CinderObjectSerializer()
# Serializer should not have a manifest
self.assertIsNone(serializer.manifest)
def test_serialize_init_pinned_has_manifest(self):
"""Test that pinned to older version we have manifest."""
objs_version = '1.5'
serializer = objects.base.CinderObjectSerializer(objs_version)
# Serializer should have the right manifest
self.assertDictEqual(fake_objects.MyHistory()[objs_version],
serializer.manifest)
def test_serialize_entity_unknown_version(self):
"""Test that bad cap version will prevent serializer creation."""
self.assertRaises(exception.CappedVersionUnknown,
objects.base.CinderObjectSerializer, '0.9')
@mock.patch('cinder.objects.base.LOG.debug')
def test_serialize_entity_basic_no_backport(self, log_debug_mock):
"""Test single element serializer with no backport."""
serializer = objects.base.CinderObjectSerializer('1.6')
primitive = serializer.serialize_entity(self.context, self.obj)
self.assertEqual('1.2', primitive['versioned_object.version'])
data = primitive['versioned_object.data']
self.assertEqual(1, data['integer'])
self.assertEqual('text', data['text'])
log_debug_mock.assert_not_called()
@mock.patch('cinder.objects.base.LOG.debug')
def test_serialize_entity_basic_backport(self, log_debug_mock):
"""Test single element serializer with backport."""
serializer = objects.base.CinderObjectSerializer('1.5')
primitive = serializer.serialize_entity(self.context, self.obj)
self.assertEqual('1.1', primitive['versioned_object.version'])
data = primitive['versioned_object.data']
self.assertNotIn('integer', data)
self.assertEqual('text', data['text'])
log_debug_mock.assert_called_once_with(self.BACKPORT_MSG,
{'obj_name': 'ChildObject',
'src_vers': '1.2',
'dst_vers': '1.1'})
@mock.patch('cinder.objects.base.LOG.debug')
def test_serialize_entity_full_no_backport(self, log_debug_mock):
"""Test related elements serialization with no backport."""
serializer = objects.base.CinderObjectSerializer('1.6')
primitive = serializer.serialize_entity(self.context, self.parent_list)
self.assertEqual('1.1', primitive['versioned_object.version'])
parent = primitive['versioned_object.data']['objects'][0]
self.assertEqual('1.1', parent['versioned_object.version'])
child = parent['versioned_object.data']['child']
self.assertEqual('1.2', child['versioned_object.version'])
log_debug_mock.assert_not_called()
@mock.patch('cinder.objects.base.LOG.debug')
def test_serialize_entity_full_backport_last_children(self,
log_debug_mock):
"""Test related elements serialization with backport of the last child.
Test that using the manifest we properly backport a child object even
when all its parents have not changed their version.
"""
serializer = objects.base.CinderObjectSerializer('1.5')
primitive = serializer.serialize_entity(self.context, self.parent_list)
self.assertEqual('1.1', primitive['versioned_object.version'])
parent = primitive['versioned_object.data']['objects'][0]
self.assertEqual('1.1', parent['versioned_object.version'])
# Only the child has been backported
child = parent['versioned_object.data']['child']
self.assertEqual('1.1', child['versioned_object.version'])
# Check that the backport has been properly done
data = child['versioned_object.data']
self.assertNotIn('integer', data)
self.assertEqual('text', data['text'])
log_debug_mock.assert_called_once_with(self.BACKPORT_MSG,
{'obj_name': 'ChildObject',
'src_vers': '1.2',
'dst_vers': '1.1'})
@mock.patch('cinder.objects.base.LOG.debug')
def test_serialize_entity_full_backport(self, log_debug_mock):
"""Test backport of the whole tree of related elements."""
serializer = objects.base.CinderObjectSerializer('1.3')
primitive = serializer.serialize_entity(self.context, self.parent_list)
# List has been backported
self.assertEqual('1.0', primitive['versioned_object.version'])
parent = primitive['versioned_object.data']['objects'][0]
# Parent has been backported as well
self.assertEqual('1.0', parent['versioned_object.version'])
# And the backport has been properly done
data = parent['versioned_object.data']
self.assertNotIn('scheduled_at', data)
# And child as well
child = parent['versioned_object.data']['child']
self.assertEqual('1.1', child['versioned_object.version'])
# Check that the backport has been properly done
data = child['versioned_object.data']
self.assertNotIn('integer', data)
self.assertEqual('text', data['text'])
log_debug_mock.assert_has_calls([
mock.call(self.BACKPORT_MSG, {'obj_name': 'ParentObjectList',
'src_vers': '1.1',
'dst_vers': '1.0'}),
mock.call(self.BACKPORT_MSG, {'obj_name': 'ParentObject',
'src_vers': '1.1',
'dst_vers': '1.0'}),
mock.call(self.BACKPORT_MSG, {'obj_name': 'ChildObject',
'src_vers': '1.2',
'dst_vers': '1.1'})])
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from senlin.common import consts
from senlin.common import exception
from senlin.engine import cluster_policy as cpm
from senlin.engine import health_manager
from senlin.engine import node as node_mod
from senlin.objects import cluster as co
from senlin.objects import cluster_policy as cpo
from senlin.objects import node as no
from senlin.policies import base as pcb
from senlin.profiles import base as pfb
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class Cluster(object):
"""A cluster is a collection of objects of the same profile type.
All operations are performed without further checking because the
checkings are supposed to be done before/after/during an action is
executed.
"""
def __init__(self, name, desired_capacity, profile_id,
context=None, **kwargs):
"""Initialize a cluster object.
The cluster defaults to have 0 node with no profile assigned.
"""
self.id = kwargs.get('id', None)
self.name = name
self.profile_id = profile_id
# Initialize the fields using kwargs passed in
self.user = kwargs.get('user', '')
self.project = kwargs.get('project', '')
self.domain = kwargs.get('domain', '')
self.init_at = kwargs.get('init_at', None)
self.created_at = kwargs.get('created_at', None)
self.updated_at = kwargs.get('updated_at', None)
self.min_size = (kwargs.get('min_size') or
consts.CLUSTER_DEFAULT_MIN_SIZE)
self.max_size = (kwargs.get('max_size') or
consts.CLUSTER_DEFAULT_MAX_SIZE)
self.desired_capacity = desired_capacity
self.next_index = kwargs.get('next_index', 1)
self.timeout = (kwargs.get('timeout') or
cfg.CONF.default_action_timeout)
self.status = kwargs.get('status', consts.CS_INIT)
self.status_reason = kwargs.get('status_reason', 'Initializing')
self.data = kwargs.get('data', {})
self.metadata = kwargs.get('metadata') or {}
self.dependents = kwargs.get('dependents') or {}
self.config = kwargs.get('config') or {}
# rt is a dict for runtime data
self.rt = {
'profile': None,
'nodes': [],
'policies': []
}
if context is not None:
self._load_runtime_data(context)
def _load_runtime_data(self, context):
if self.id is None:
return
policies = []
bindings = cpo.ClusterPolicy.get_all(context, self.id)
for b in bindings:
# Detect policy type conflicts
policy = pcb.Policy.load(context, b.policy_id, project_safe=False)
policies.append(policy)
self.rt = {
'profile': pfb.Profile.load(context,
profile_id=self.profile_id,
project_safe=False),
'nodes': no.Node.get_all_by_cluster(context, self.id),
'policies': policies
}
def store(self, context):
"""Store the cluster in database and return its ID.
If the ID already exists, we do an update.
"""
values = {
'name': self.name,
'profile_id': self.profile_id,
'user': self.user,
'project': self.project,
'domain': self.domain,
'init_at': self.init_at,
'created_at': self.created_at,
'updated_at': self.updated_at,
'min_size': self.min_size,
'max_size': self.max_size,
'desired_capacity': self.desired_capacity,
'next_index': self.next_index,
'timeout': self.timeout,
'status': self.status,
'status_reason': self.status_reason,
'meta_data': self.metadata,
'data': self.data,
'dependents': self.dependents,
'config': self.config,
}
timestamp = timeutils.utcnow(True)
if self.id:
values['updated_at'] = timestamp
co.Cluster.update(context, self.id, values)
else:
self.init_at = timestamp
values['init_at'] = timestamp
cluster = co.Cluster.create(context, values)
self.id = cluster.id
self._load_runtime_data(context)
return self.id
@classmethod
def _from_object(cls, context, obj):
"""Construct a cluster from database object.
:param context: the context used for DB operations;
:param obj: a DB cluster object that will receive all fields;
"""
kwargs = {
'id': obj.id,
'user': obj.user,
'project': obj.project,
'domain': obj.domain,
'init_at': obj.init_at,
'created_at': obj.created_at,
'updated_at': obj.updated_at,
'min_size': obj.min_size,
'max_size': obj.max_size,
'next_index': obj.next_index,
'timeout': obj.timeout,
'status': obj.status,
'status_reason': obj.status_reason,
'data': obj.data,
'metadata': obj.metadata,
'dependents': obj.dependents,
'config': obj.config,
}
return cls(obj.name, obj.desired_capacity, obj.profile_id,
context=context, **kwargs)
@classmethod
def load(cls, context, cluster_id=None, dbcluster=None, project_safe=True):
"""Retrieve a cluster from database."""
if dbcluster is None:
dbcluster = co.Cluster.get(context, cluster_id,
project_safe=project_safe)
if dbcluster is None:
raise exception.ResourceNotFound(type='cluster', id=cluster_id)
return cls._from_object(context, dbcluster)
@classmethod
def load_all(cls, context, limit=None, marker=None, sort=None,
filters=None, project_safe=True):
"""Retrieve all clusters from database."""
objs = co.Cluster.get_all(context, limit=limit, marker=marker,
sort=sort, filters=filters,
project_safe=project_safe)
for obj in objs:
cluster = cls._from_object(context, obj)
yield cluster
def set_status(self, context, status, reason=None, **kwargs):
"""Set status of the cluster.
:param context: A DB session for accessing the backend database.
:param status: A string providing the new status of the cluster.
:param reason: A string containing the reason for the status change.
It can be omitted when invoking this method.
:param dict kwargs: Other optional attributes to be updated.
:returns: Nothing.
"""
values = {}
now = timeutils.utcnow(True)
if status == consts.CS_ACTIVE and self.status == consts.CS_CREATING:
self.created_at = now
values['created_at'] = now
elif (status == consts.CS_ACTIVE and
self.status in (consts.CS_UPDATING, consts.CS_RESIZING)):
self.updated_at = now
values['updated_at'] = now
self.status = status
values['status'] = status
if reason:
self.status_reason = reason
values['status_reason'] = reason
for k, v in kwargs.items():
if hasattr(self, k):
setattr(self, k, v)
values[k] = v
# There is a possibility that the profile id is changed
if 'profile_id' in values:
profile = pfb.Profile.load(context, profile_id=self.profile_id)
self.rt['profile'] = profile
co.Cluster.update(context, self.id, values)
return
def do_create(self, context, **kwargs):
"""Additional logic at the beginning of cluster creation process.
Set cluster status to CREATING.
"""
if self.status != consts.CS_INIT:
LOG.error('Cluster is in status "%s"', self.status)
return False
self.set_status(context, consts.CS_CREATING, 'Creation in progress')
try:
pfb.Profile.create_cluster_object(context, self)
except exception.EResourceCreation as ex:
self.set_status(context, consts.CS_ERROR, str(ex))
return False
return True
def do_delete(self, context, **kwargs):
"""Additional logic at the end of cluster deletion process."""
self.set_status(context, consts.CS_DELETING, 'Deletion in progress')
try:
pfb.Profile.delete_cluster_object(context, self)
except exception.EResourceDeletion as ex:
self.set_status(context, consts.CS_ERROR, str(ex))
return False
co.Cluster.delete(context, self.id)
return True
def do_update(self, context, **kwargs):
"""Additional logic at the beginning of cluster updating progress.
This method is intended to be called only from an action.
"""
self.set_status(context, consts.CS_UPDATING, 'Update in progress')
return True
def do_check(self, context, **kwargs):
"""Additional logic at the beginning of cluster checking process.
Set cluster status to CHECKING.
"""
self.set_status(context, consts.CS_CHECKING, 'Check in progress')
return True
def do_recover(self, context, **kwargs):
"""Additional logic at the beginning of cluster recovering process.
Set cluster status to RECOVERING.
"""
self.set_status(context, consts.CS_RECOVERING, 'Recovery in progress')
return True
def do_operation(self, context, **kwargs):
"""Additional logic at the beginning of cluster recovering process.
Set cluster status to OPERATING.
"""
operation = kwargs.get("operation", "unknown")
self.set_status(context, consts.CS_OPERATING,
"Operation %s in progress" % operation)
return True
def attach_policy(self, ctx, policy_id, values):
"""Attach policy object to the cluster.
Note this method MUST be called with the cluster locked.
:param ctx: A context for DB operation.
:param policy_id: ID of the policy object.
:param values: Optional dictionary containing binding properties.
:returns: A tuple containing a boolean result and a reason string.
"""
policy = pcb.Policy.load(ctx, policy_id)
# Check if policy has already been attached
for existing in self.rt['policies']:
# Policy already attached
if existing.id == policy_id:
return True, 'Policy already attached.'
# Detect policy type conflicts
if (existing.type == policy.type) and policy.singleton:
reason = ("Only one instance of policy type (%(ptype)s) can "
"be attached to a cluster, but another instance "
"(%(existing)s) is found attached to the cluster "
"(%(cluster)s) already."
) % {'ptype': policy.type,
'existing': existing.id,
'cluster': self.id}
return False, reason
# invoke policy callback
enabled = bool(values.get('enabled', True))
res, data = policy.attach(self, enabled=enabled)
if not res:
return False, data
kwargs = {
'enabled': enabled,
'data': data,
'priority': policy.PRIORITY
}
cp = cpm.ClusterPolicy(self.id, policy_id, **kwargs)
cp.store(ctx)
# refresh cached runtime
self.rt['policies'].append(policy)
return True, 'Policy attached.'
def update_policy(self, ctx, policy_id, **values):
"""Update a policy that is already attached to a cluster.
Note this method must be called with the cluster locked.
:param ctx: A context for DB operation.
:param policy_id: ID of the policy object.
:param values: Optional dictionary containing new binding properties.
:returns: A tuple containing a boolean result and a string reason.
"""
# Check if policy has already been attached
found = False
for existing in self.policies:
if existing.id == policy_id:
found = True
break
if not found:
return False, 'Policy not attached.'
enabled = values.get('enabled', None)
if enabled is None:
return True, 'No update is needed.'
params = {'enabled': bool(enabled)}
# disable health check if necessary
policy_type = existing.type.split('-')[0]
if policy_type == 'senlin.policy.health':
if enabled is True:
health_manager.enable(self.id)
else:
health_manager.disable(self.id)
cpo.ClusterPolicy.update(ctx, self.id, policy_id, params)
return True, 'Policy updated.'
def detach_policy(self, ctx, policy_id):
"""Detach policy object from the cluster.
Note this method MUST be called with the cluster locked.
:param ctx: A context for DB operation.
:param policy_id: ID of the policy object.
:returns: A tuple containing a boolean result and a reason string.
"""
# Check if policy has already been attached
found = None
for existing in self.policies:
if existing.id == policy_id:
found = existing
break
if found is None:
return False, 'Policy not attached.'
policy = pcb.Policy.load(ctx, policy_id)
res, reason = policy.detach(self)
if not res:
return res, reason
cpo.ClusterPolicy.delete(ctx, self.id, policy_id)
self.rt['policies'].remove(found)
return True, 'Policy detached.'
@property
def nodes(self):
return self.rt['nodes']
def add_node(self, node):
"""Append specified node to the cluster cache.
:param node: The node to become a new member of the cluster.
"""
self.rt['nodes'].append(node)
def remove_node(self, node_id):
"""Remove node with specified ID from cache.
:param node_id: ID of the node to be removed from cache.
"""
for node in self.rt['nodes']:
if node.id == node_id:
self.rt['nodes'].remove(node)
def update_node(self, nodes):
"""Update cluster runtime data
:param nodes: List of node objects
"""
self.rt['nodes'] = nodes
@property
def policies(self):
return self.rt['policies']
def get_region_distribution(self, regions):
"""Get node distribution regarding given regions.
:param regions: list of region names to check.
:return: a dict containing region and number as key value pairs.
"""
dist = dict.fromkeys(regions, 0)
for node in self.nodes:
placement = node.data.get('placement', {})
if placement:
region = placement.get('region_name', None)
if region and region in regions:
dist[region] += 1
return dist
def get_zone_distribution(self, ctx, zones):
"""Get node distribution regarding the given the availability zones.
The availability zone information is only available for some profiles.
:param ctx: context used to access node details.
:param zones: list of zone names to check.
:returns: a dict containing zone and number as key-value pairs.
"""
dist = dict.fromkeys(zones, 0)
for node in self.nodes:
placement = node.data.get('placement', {})
if placement and 'zone' in placement:
zone = placement['zone']
dist[zone] += 1
else:
details = node.get_details(ctx)
zname = details.get('OS-EXT-AZ:availability_zone', None)
if zname and zname in dist:
dist[zname] += 1
return dist
def nodes_by_region(self, region):
"""Get list of nodes that belong to the specified region.
:param region: Name of region for filtering.
:return: A list of nodes that are from the specified region.
"""
result = []
for node in self.nodes:
placement = node.data.get('placement', {})
if placement and 'region_name' in placement:
if region == placement['region_name']:
result.append(node)
return result
def nodes_by_zone(self, zone):
"""Get list of nodes that reside in the specified availability zone.
:param zone: Name of availability zone for filtering.
:return: A list of nodes that reside in the specified AZ.
"""
result = []
for node in self.nodes:
placement = node.data.get('placement', {})
if placement and 'zone' in placement:
if zone == placement['zone']:
result.append(node)
return result
def health_check(self, ctx):
"""Check physical resources status
:param ctx: The context to operate node object
"""
# Note this procedure is a pure sequential operation,
# its not suitable for large scale clusters.
old_nodes = self.nodes
for node in old_nodes:
node.do_check(ctx)
nodes = node_mod.Node.load_all(ctx, cluster_id=self.id)
self.update_node([n for n in nodes])
def eval_status(self, ctx, operation, **params):
"""Re-evaluate cluster's health status.
:param ctx: The requesting context.
:param operation: The operation that triggers this status evaluation.
:returns: ``None``.
"""
nodes = node_mod.Node.load_all(ctx, cluster_id=self.id)
self.rt['nodes'] = [n for n in nodes]
active_count = 0
for node in self.nodes:
if node.status == consts.NS_ACTIVE:
active_count += 1
# get provided desired_capacity/min_size/max_size
desired = params.get('desired_capacity', self.desired_capacity)
min_size = params.get('min_size', self.min_size)
max_size = params.get('max_size', self.max_size)
values = params or {}
if active_count < min_size:
status = consts.CS_ERROR
reason = ("%(o)s: number of active nodes is below min_size "
"(%(n)d).") % {'o': operation, 'n': min_size}
elif active_count < desired:
status = consts.CS_WARNING
reason = ("%(o)s: number of active nodes is below "
"desired_capacity "
"(%(n)d).") % {'o': operation, 'n': desired}
elif max_size < 0 or active_count <= max_size:
status = consts.CS_ACTIVE
reason = ("%(o)s: number of active nodes is equal or above "
"desired_capacity "
"(%(n)d).") % {'o': operation, 'n': desired}
else:
status = consts.CS_WARNING
reason = ("%(o)s: number of active nodes is above max_size "
"(%(n)d).") % {'o': operation, 'n': max_size}
values.update({'status': status, 'status_reason': reason})
co.Cluster.update(ctx, self.id, values)
|
|
import json
import re
from collections import namedtuple
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from django.db import models
from django.utils.translation import ugettext_lazy
from accounts.models import UserSettings
from accounts.payment_plans import FEATURE_MIN_SITE_FAVICON, FEATURE_MIN_SITES, minimum
from assets.fields import AssetField
from assets.models import Asset
from podcasts.models import Podcast
from pinecast.helpers import cached_method
GA_VALIDATOR = RegexValidator(r'^[0-9a-zA-Z\-]*$', ugettext_lazy('Only GA IDs are accepted'))
ITUNES_ID_EXTRACTOR = re.compile(r'id(\w+)')
LinkTuple = namedtuple('LinkTuple', ['title', 'url', 'model'])
saveable_settings = set([
'custom_cname', 'itunes_url', 'google_play_url', 'stitcher_url',
'show_itunes_banner', 'analytics_id',
])
class Site(models.Model):
SITE_THEMES = (
# Inspired by http://themepathra.tumblr.com/
('panther', ugettext_lazy('Panther')),
# Inspired by http://demo.themestation.net/podcaster/
('podcasty', ugettext_lazy('Podcasty')),
('zen', ugettext_lazy('Zen')),
('unstyled', ugettext_lazy('Unstyled')),
)
podcast = models.OneToOneField(Podcast)
theme = models.CharField(choices=SITE_THEMES, max_length=16)
theme_data = models.TextField(blank=True, default='')
custom_cname = models.CharField(blank=True, null=True, max_length=64)
cover_image = AssetField()
favicon = AssetField()
logo = AssetField()
itunes_url = models.URLField(blank=True, null=True, max_length=500)
google_play_url = models.URLField(blank=True, null=True, max_length=500)
stitcher_url = models.URLField(blank=True, null=True, max_length=500)
show_itunes_banner = models.BooleanField(default=False)
analytics_id = models.CharField(blank=True, null=True, max_length=32, validators=[GA_VALIDATOR])
@cached_method
def get_domain(self):
if not self.custom_cname:
return self.get_subdomain()
us = UserSettings.get_from_user(self.podcast.owner)
if not minimum(us.plan, FEATURE_MIN_SITES):
return self.get_subdomain()
return 'http://%s' % self.custom_cname
@cached_method
def get_subdomain(self):
return 'https://%s.pinecast.co' % self.podcast.slug
def get_cover_image_url(self):
if self.cover_image:
return self.cover_image.get_url()
return None
def get_banner_id(self):
if not self.show_itunes_banner:
return None
url = self.itunes_url
if not url:
return None
match = ITUNES_ID_EXTRACTOR.search(url)
if not match:
return None
return match.group(1)
def set_theme_data(self, data, save=False):
try:
theme_parsed = json.loads(data.decode('utf-8'))
if not isinstance(theme_parsed, dict):
if settings.DEBUG:
print('Is not a dict')
return False
except Exception as e:
if settings.DEBUG:
raise e
return False
self.theme_data = json.dumps(theme_parsed)
if save:
self.save()
return True
def get_site_links(self):
return [
{
'title': link.title,
'url': link.url,
} for
link in
self.sitelink_set.all()
]
def set_site_links(self, data):
try:
if isinstance(data, bytes):
data = data.decode('utf-8')
parsed = json.loads(data)
if not isinstance(parsed, list) or any(not isinstance(x, dict) for x in parsed):
if settings.DEBUG:
print('Is not a list containing dicts')
return False
new_links = [
LinkTuple(link['title'][:256], link['url'][:500], None) for
link in
parsed if
isinstance(link.get('title'), str) and isinstance(link.get('url'), str)
]
except Exception as e:
if settings.DEBUG:
raise e
return False
old_links = [
LinkTuple(link.title, link.url, link) for
link in
self.sitelink_set.all()
]
for link in old_links:
if link not in new_links:
print('Deleting {}'.format(link))
link.model.delete()
for link in new_links:
if link not in old_links:
print('Adding {}'.format(link))
SiteLink(title=link.title, url=link.url, site=self).save()
return True
def set_settings(self, data):
try:
if isinstance(data, bytes):
data = data.decode('utf-8')
settings_parsed = json.loads(data)
if not isinstance(settings_parsed, dict):
if settings.DEBUG:
print('Is not a dict')
return False
for key, value in settings_parsed.items():
if key not in saveable_settings:
if settings.DEBUG:
print('{} not an available setting'.format(key))
return False
if value is not None:
if key != 'show_itunes_banner' and not isinstance(value, str):
if settings.DEBUG:
print('{} contains invalid value: {}'.format(key, value))
return False
elif key == 'show_itunes_banner' and not isinstance(value, bool):
if settings.DEBUG:
print('{} contains invalid value: {}'.format(key, value))
return False
except Exception as e:
if settings.DEBUG:
raise e
return False
for key, value in settings_parsed.items():
if key == 'show_itunes_banner':
value = value == True
if not settings_parsed.get('itunes_url', self.itunes_url):
value = False
setattr(self, 'show_itunes_banner', value)
continue
setattr(self, key, None if not value else value)
self.save()
return True
def set_assets(self, data):
us = UserSettings.get_from_user(self.podcast.owner)
try:
if isinstance(data, bytes):
data = data.decode('utf-8')
assets_parsed = json.loads(data)
if not isinstance(assets_parsed, dict):
if settings.DEBUG:
print('Is not a dict')
return False
for key, value in assets_parsed.items():
if key != 'site_logo' and key != 'site_favicon':
if settings.DEBUG:
print('Unacceptable key {}'.format(key))
return False
if (key == 'site_favicon' and
not minimum(us.plan, FEATURE_MIN_SITE_FAVICON)):
if settings.DEBUG:
print('Not allowed to set favicon')
return False
if not isinstance(value, str) and value is not None:
if settings.DEBUG:
print('Is not a string')
return False
except Exception as e:
if settings.DEBUG:
raise e
return False
for key, value in assets_parsed.items():
if key == 'site_logo':
if value:
self.logo = Asset.from_signed_s3_url(
signed_s3_url=value,
internal_type=key,
owner=self.podcast.owner,
).save_unique()
else:
if self.logo:
self.logo.deleted = True
self.logo.save()
self.logo = None
elif key == 'site_favicon':
if value:
self.favicon = Asset.from_signed_s3_url(
signed_s3_url=value,
internal_type=key,
owner=self.podcast.owner,
).save_unique()
else:
if self.favicon:
self.favicon.deleted = True
self.favicon.save()
self.favicon = None
if assets_parsed:
self.save()
return True
def __str__(self):
return '%s: %s' % (self.podcast.slug, self.podcast.name)
class SiteLink(models.Model):
site = models.ForeignKey(Site)
title = models.CharField(max_length=256)
url = models.URLField(blank=True, max_length=500)
PAGE_TYPES = (
('markdown', ugettext_lazy('Markdown')),
('hosts', ugettext_lazy('Hosts')),
('contact', ugettext_lazy('Contact')),
)
HOST_KEYS = (
'name',
'email',
'twitter',
'instagram',
'twitch',
'youtube',
'facebook',
'url',
)
CONTACT_KEYS = (
'email',
'twitter',
'facebook',
'instagram',
'twitch',
'youtube',
)
class SitePage(models.Model):
PAGE_TYPES = PAGE_TYPES
site = models.ForeignKey(Site)
title = models.CharField(max_length=256)
slug = models.SlugField()
page_type = models.CharField(choices=PAGE_TYPES, max_length=16)
created = models.DateTimeField(auto_now_add=True)
body = models.TextField()
def clean(self):
if not any(x[0] == self.page_type for x in PAGE_TYPES):
raise ValidationError('Unknown type')
if self.page_type == 'markdown':
return
parsed = None
try:
parsed = json.loads(self.body)
except Exception:
raise ValidationError('Cannot parse body')
if self.page_type == 'contact':
if not isinstance(parsed, dict):
raise ValidationError('Invalid body')
if not all(isinstance(v, str) for v in parsed.values()):
raise ValidationError('All keys must be strings')
if not all(len(v) <= 256 for v in parsed.values()):
raise ValidationError('Some input too long')
elif self.page_type == 'hosts':
if not isinstance(parsed, list):
raise ValidationError('Must be an array')
if not all(isinstance(v, dict) for v in parsed):
raise ValidationError('Must be an array of dicts')
for host in parsed:
for k, v in host.items():
if k not in HOST_KEYS:
raise ValidationError('Unexpected host key "{}"'.format(k))
if k == 'name' and not isinstance(v, str):
raise ValidationError('name must be string')
elif k != 'name':
if not isinstance(v, list):
raise ValidationError('non-name props must be arrays')
if not all(isinstance(x, str) for x in v):
raise ValidationError('non-name props must be arrays of strings')
if len(v) > 4:
raise ValidationError('Too many values for non-name prop')
if any(len(x) > 256 for x in v):
raise ValidationError('Non-name prop value too long')
@classmethod
def get_body_from_req(cls, req, page_type=None):
page_type = page_type or req.POST.get('page_type')
if page_type == 'markdown':
return req.POST.get('markdown_body')
elif page_type == 'hosts':
blob = []
try:
input_blob = json.loads(req.POST.get('host_blob'))
except Exception:
input_blob = []
for host in input_blob:
if not host.get('name'):
continue
host_blob = {'name': host.get('name')}
if 'email' in host:
host_blob['email'] = str(host.get('email'))[:64]
if 'twitter' in host:
host_blob['twitter'] = str(host.get('twitter'))[:32]
if 'instagram' in host:
host_blob['instagram'] = str(host.get('instagram'))[:32]
if 'twitch' in host:
host_blob['twitch'] = str(host.get('twitch'))[:32]
if 'youtube' in host:
host_blob['youtube'] = str(host.get('youtube'))[:32]
if 'facebook' in host:
host_blob['facebook'] = str(host.get('facebook'))[:256]
if 'url' in host:
host_blob['url'] = str(host.get('url'))[:256]
blob.append(host_blob)
return json.dumps(blob)
elif page_type == 'contact':
blob = {}
# I'm saving these all as arrays in case someday we want to allow
# multiple of each. Easy enough to subscript the array for now, and
# makes things forward-compatible.
if req.POST.get('contact_email'):
blob['email'] = [str(req.POST.get('contact_email', ''))[:64]]
if req.POST.get('contact_twitter'):
blob['twitter'] = [str(req.POST.get('contact_twitter', ''))[:32]]
if req.POST.get('contact_facebook'):
blob['facebook'] = [str(req.POST.get('contact_facebook', ''))[:256]]
if req.POST.get('contact_instagram'):
blob['instagram'] = [str(req.POST.get('contact_instagram', ''))[:32]]
if req.POST.get('contact_twitch'):
blob['twitch'] = [str(req.POST.get('contact_twitch', ''))[:32]]
if req.POST.get('contact_youtube'):
blob['youtube'] = [str(req.POST.get('contact_youtube', ''))[:32]]
return json.dumps(blob)
|
|
import functools
import re
from itertools import chain
from django.conf import settings
from django.db import models
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.migrations.operations.models import AlterModelOptions
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.utils import (
COMPILED_REGEX_TYPE, RegexObject, get_migration_name_timestamp,
)
from .topological_sort import stable_topological_sort
class MigrationAutodetector:
"""
Takes a pair of ProjectStates, and compares them to see what the
first would need doing to make it match the second (the second
usually being the project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
self.existing_apps = {app for app, model in from_state.models}
def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None):
"""
Main entry point to produce a list of applicable changes.
Takes a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes(convert_apps, graph)
changes = self.arrange_for_graph(changes, graph, migration_name)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def deep_deconstruct(self, obj):
"""
Recursive deconstruction for a field and its arguments.
Used for full comparison for rename/alter; sometimes a single-level
deconstruction will not compare correctly.
"""
if isinstance(obj, list):
return [self.deep_deconstruct(value) for value in obj]
elif isinstance(obj, tuple):
return tuple(self.deep_deconstruct(value) for value in obj)
elif isinstance(obj, dict):
return {
key: self.deep_deconstruct(value)
for key, value in obj.items()
}
elif isinstance(obj, functools.partial):
return (obj.func, self.deep_deconstruct(obj.args), self.deep_deconstruct(obj.keywords))
elif isinstance(obj, COMPILED_REGEX_TYPE):
return RegexObject(obj)
elif isinstance(obj, type):
# If this is a type that implements 'deconstruct' as an instance method,
# avoid treating this as being deconstructible itself - see #22951
return obj
elif hasattr(obj, 'deconstruct'):
deconstructed = obj.deconstruct()
if isinstance(obj, models.Field):
# we have a field which also returns a name
deconstructed = deconstructed[1:]
path, args, kwargs = deconstructed
return (
path,
[self.deep_deconstruct(value) for value in args],
{
key: self.deep_deconstruct(value)
for key, value in kwargs.items()
},
)
else:
return obj
def only_relation_agnostic_fields(self, fields):
"""
Return a definition of the fields that ignores field names and
what related fields actually relate to.
Used for detecting renames (as, of course, the related fields
change during renames)
"""
fields_def = []
for name, field in sorted(fields):
deconstruction = self.deep_deconstruct(field)
if field.remote_field and field.remote_field.model:
del deconstruction[2]['to']
fields_def.append(deconstruction)
return fields_def
def _detect_changes(self, convert_apps=None, graph=None):
"""
Returns a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
convert_apps is the list of apps to convert to use migrations
(i.e. to make initial migrations for, in the usual case)
graph is an optional argument that, if provided, can help improve
dependency generation and avoid potential circular dependencies.
"""
# The first phase is generating all the operations for each app
# and gathering them into a big per-app list.
# We'll then go through that list later and order it and split
# into migrations to resolve dependencies caused by M2Ms and FKs.
self.generated_operations = {}
self.altered_indexes = {}
# Prepare some old/new state and model lists, separating
# proxy models and ignoring unmigrated apps.
self.old_apps = self.from_state.concrete_apps
self.new_apps = self.to_state.apps
self.old_model_keys = []
self.old_proxy_keys = []
self.old_unmanaged_keys = []
self.new_model_keys = []
self.new_proxy_keys = []
self.new_unmanaged_keys = []
for al, mn in sorted(self.from_state.models.keys()):
model = self.old_apps.get_model(al, mn)
if not model._meta.managed:
self.old_unmanaged_keys.append((al, mn))
elif al not in self.from_state.real_apps:
if model._meta.proxy:
self.old_proxy_keys.append((al, mn))
else:
self.old_model_keys.append((al, mn))
for al, mn in sorted(self.to_state.models.keys()):
model = self.new_apps.get_model(al, mn)
if not model._meta.managed:
self.new_unmanaged_keys.append((al, mn))
elif (
al not in self.from_state.real_apps or
(convert_apps and al in convert_apps)
):
if model._meta.proxy:
self.new_proxy_keys.append((al, mn))
else:
self.new_model_keys.append((al, mn))
# Renames have to come first
self.generate_renamed_models()
# Prepare lists of fields and generate through model map
self._prepare_field_lists()
self._generate_through_model_map()
# Generate non-rename model operations
self.generate_deleted_models()
self.generate_created_models()
self.generate_deleted_proxies()
self.generate_created_proxies()
self.generate_altered_options()
self.generate_altered_managers()
# Create the altered indexes and store them in self.altered_indexes.
# This avoids the same computation in generate_removed_indexes()
# and generate_added_indexes().
self.create_altered_indexes()
# Generate index removal operations before field is removed
self.generate_removed_indexes()
# Generate field operations
self.generate_renamed_fields()
self.generate_removed_fields()
self.generate_added_fields()
self.generate_altered_fields()
self.generate_altered_unique_together()
self.generate_altered_index_together()
self.generate_added_indexes()
self.generate_altered_db_table()
self.generate_altered_order_with_respect_to()
self._sort_migrations()
self._build_migration_list(graph)
self._optimize_migrations()
return self.migrations
def _prepare_field_lists(self):
"""
Prepare field lists, and prepare a list of the fields that used
through models in the old state so we can make dependencies
from the through model deletion to the field that uses it.
"""
self.kept_model_keys = set(self.old_model_keys).intersection(self.new_model_keys)
self.kept_proxy_keys = set(self.old_proxy_keys).intersection(self.new_proxy_keys)
self.kept_unmanaged_keys = set(self.old_unmanaged_keys).intersection(self.new_unmanaged_keys)
self.through_users = {}
self.old_field_keys = set()
self.new_field_keys = set()
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
self.old_field_keys.update((app_label, model_name, x) for x, y in old_model_state.fields)
self.new_field_keys.update((app_label, model_name, x) for x, y in new_model_state.fields)
def _generate_through_model_map(self):
"""
Through model map generation
"""
for app_label, model_name in sorted(self.old_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
for field_name, field in old_model_state.fields:
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(field_name)
if (hasattr(old_field, "remote_field") and getattr(old_field.remote_field, "through", None) and
not old_field.remote_field.through._meta.auto_created):
through_key = (
old_field.remote_field.through._meta.app_label,
old_field.remote_field.through._meta.model_name,
)
self.through_users[through_key] = (app_label, old_model_name, field_name)
def _build_migration_list(self, graph=None):
"""
We need to chop the lists of operations up into migrations with
dependencies on each other. We do this by stepping up an app's list of
operations until we find one that has an outgoing dependency that isn't
in another app's migration yet (hasn't been chopped off its list). We
then chop off the operations before it into a migration and move onto
the next app. If we loop back around without doing anything, there's a
circular dependency (which _should_ be impossible as the operations are
all split at this point so they can't depend and be depended on).
"""
self.migrations = {}
num_ops = sum(len(x) for x in self.generated_operations.values())
chop_mode = False
while num_ops:
# On every iteration, we step through all the apps and see if there
# is a completed set of operations.
# If we find that a subset of the operations are complete we can
# try to chop it off from the rest and continue, but we only
# do this if we've already been through the list once before
# without any chopping and nothing has changed.
for app_label in sorted(self.generated_operations.keys()):
chopped = []
dependencies = set()
for operation in list(self.generated_operations[app_label]):
deps_satisfied = True
operation_dependencies = set()
for dep in operation._auto_deps:
is_swappable_dep = False
if dep[0] == "__setting__":
# We need to temporarily resolve the swappable dependency to prevent
# circular references. While keeping the dependency checks on the
# resolved model we still add the swappable dependencies.
# See #23322
resolved_app_label, resolved_object_name = getattr(settings, dep[1]).split('.')
original_dep = dep
dep = (resolved_app_label, resolved_object_name.lower(), dep[2], dep[3])
is_swappable_dep = True
if dep[0] != app_label and dep[0] != "__setting__":
# External app dependency. See if it's not yet
# satisfied.
for other_operation in self.generated_operations.get(dep[0], []):
if self.check_dependency(other_operation, dep):
deps_satisfied = False
break
if not deps_satisfied:
break
else:
if is_swappable_dep:
operation_dependencies.add((original_dep[0], original_dep[1]))
elif dep[0] in self.migrations:
operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name))
else:
# If we can't find the other app, we add a first/last dependency,
# but only if we've already been through once and checked everything
if chop_mode:
# If the app already exists, we add a dependency on the last migration,
# as we don't know which migration contains the target field.
# If it's not yet migrated or has no migrations, we use __first__
if graph and graph.leaf_nodes(dep[0]):
operation_dependencies.add(graph.leaf_nodes(dep[0])[0])
else:
operation_dependencies.add((dep[0], "__first__"))
else:
deps_satisfied = False
if deps_satisfied:
chopped.append(operation)
dependencies.update(operation_dependencies)
self.generated_operations[app_label] = self.generated_operations[app_label][1:]
else:
break
# Make a migration! Well, only if there's stuff to put in it
if dependencies or chopped:
if not self.generated_operations[app_label] or chop_mode:
subclass = type("Migration", (Migration,), {"operations": [], "dependencies": []})
instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label)
instance.dependencies = list(dependencies)
instance.operations = chopped
instance.initial = app_label not in self.existing_apps
self.migrations.setdefault(app_label, []).append(instance)
chop_mode = False
else:
self.generated_operations[app_label] = chopped + self.generated_operations[app_label]
new_num_ops = sum(len(x) for x in self.generated_operations.values())
if new_num_ops == num_ops:
if not chop_mode:
chop_mode = True
else:
raise ValueError("Cannot resolve operation dependencies: %r" % self.generated_operations)
num_ops = new_num_ops
def _sort_migrations(self):
"""
Reorder to make things possible. The order we have already isn't bad,
but we need to pull a few things around so FKs work nicely inside the
same app
"""
for app_label, ops in sorted(self.generated_operations.items()):
# construct a dependency graph for intra-app dependencies
dependency_graph = {op: set() for op in ops}
for op in ops:
for dep in op._auto_deps:
if dep[0] == app_label:
for op2 in ops:
if self.check_dependency(op2, dep):
dependency_graph[op].add(op2)
# we use a stable sort for deterministic tests & general behavior
self.generated_operations[app_label] = stable_topological_sort(ops, dependency_graph)
def _optimize_migrations(self):
# Add in internal dependencies among the migrations
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# De-dupe dependencies
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
# Optimize migrations
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.operations = MigrationOptimizer().optimize(migration.operations, app_label=app_label)
def check_dependency(self, operation, dependency):
"""
Returns ``True`` if the given operation depends on the given dependency,
``False`` otherwise.
"""
# Created model
if dependency[2] is None and dependency[3] is True:
return (
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower()
)
# Created field
elif dependency[2] is not None and dependency[3] is True:
return (
(
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower() and
any(dependency[2] == x for x, y in operation.fields)
) or
(
isinstance(operation, operations.AddField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
)
# Removed field
elif dependency[2] is not None and dependency[3] is False:
return (
isinstance(operation, operations.RemoveField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# Removed model
elif dependency[2] is None and dependency[3] is False:
return (
isinstance(operation, operations.DeleteModel) and
operation.name_lower == dependency[1].lower()
)
# Field being altered
elif dependency[2] is not None and dependency[3] == "alter":
return (
isinstance(operation, operations.AlterField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# order_with_respect_to being unset for a field
elif dependency[2] is not None and dependency[3] == "order_wrt_unset":
return (
isinstance(operation, operations.AlterOrderWithRespectTo) and
operation.name_lower == dependency[1].lower() and
(operation.order_with_respect_to or "").lower() != dependency[2].lower()
)
# Field is removed and part of an index/unique_together
elif dependency[2] is not None and dependency[3] == "foo_together_change":
return (
isinstance(operation, (operations.AlterUniqueTogether,
operations.AlterIndexTogether)) and
operation.name_lower == dependency[1].lower()
)
# Unknown dependency. Raise an error.
else:
raise ValueError("Can't handle dependency %r" % (dependency, ))
def add_operation(self, app_label, operation, dependencies=None, beginning=False):
# Dependencies are (app_label, model_name, field_name, create/delete as True/False)
operation._auto_deps = dependencies or []
if beginning:
self.generated_operations.setdefault(app_label, []).insert(0, operation)
else:
self.generated_operations.setdefault(app_label, []).append(operation)
def swappable_first_key(self, item):
"""
Sorting key function that places potential swappable models first in
lists of created models (only real way to solve #22783)
"""
try:
model = self.new_apps.get_model(item[0], item[1])
base_names = [base.__name__ for base in model.__bases__]
string_version = "%s.%s" % (item[0], item[1])
if (
model._meta.swappable or
"AbstractUser" in base_names or
"AbstractBaseUser" in base_names or
settings.AUTH_USER_MODEL.lower() == string_version.lower()
):
return ("___" + item[0], "___" + item[1])
except LookupError:
pass
return item
def generate_renamed_models(self):
"""
Finds any renamed models, and generates the operations for them,
and removes the old entry from the model lists.
Must be run before other model-level generation.
"""
self.renamed_models = {}
self.renamed_models_rel = {}
added_models = set(self.new_model_keys) - set(self.old_model_keys)
for app_label, model_name in sorted(added_models):
model_state = self.to_state.models[app_label, model_name]
model_fields_def = self.only_relation_agnostic_fields(model_state.fields)
removed_models = set(self.old_model_keys) - set(self.new_model_keys)
for rem_app_label, rem_model_name in removed_models:
if rem_app_label == app_label:
rem_model_state = self.from_state.models[rem_app_label, rem_model_name]
rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields)
if model_fields_def == rem_model_fields_def:
if self.questioner.ask_rename_model(rem_model_state, model_state):
self.add_operation(
app_label,
operations.RenameModel(
old_name=rem_model_state.name,
new_name=model_state.name,
)
)
self.renamed_models[app_label, model_name] = rem_model_name
renamed_models_rel_key = '%s.%s' % (rem_model_state.app_label, rem_model_state.name)
self.renamed_models_rel[renamed_models_rel_key] = '%s.%s' % (
model_state.app_label,
model_state.name,
)
self.old_model_keys.remove((rem_app_label, rem_model_name))
self.old_model_keys.append((app_label, model_name))
break
def generate_created_models(self):
"""
Find all new models (both managed and unmanaged) and make create
operations for them as well as separate operations to create any
foreign key or M2M relationships (we'll optimize these back in later
if we can).
We also defer any model options that refer to collections of fields
that might be deferred (e.g. unique_together, index_together).
"""
old_keys = set(self.old_model_keys).union(self.old_unmanaged_keys)
added_models = set(self.new_model_keys) - old_keys
added_unmanaged_models = set(self.new_unmanaged_keys) - old_keys
all_added_models = chain(
sorted(added_models, key=self.swappable_first_key, reverse=True),
sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True)
)
for app_label, model_name in all_added_models:
model_state = self.to_state.models[app_label, model_name]
model_opts = self.new_apps.get_model(app_label, model_name)._meta
# Gather related fields
related_fields = {}
primary_key_rel = None
for field in model_opts.local_fields:
if field.remote_field:
if field.remote_field.model:
if field.primary_key:
primary_key_rel = field.remote_field.model
elif not field.remote_field.parent_link:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if (getattr(field.remote_field, "through", None) and
not field.remote_field.through._meta.auto_created):
related_fields[field.name] = field
for field in model_opts.local_many_to_many:
if field.remote_field.model:
related_fields[field.name] = field
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
# Are there indexes/unique|index_together to defer?
indexes = model_state.options.pop('indexes')
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
order_with_respect_to = model_state.options.pop('order_with_respect_to', None)
# Depend on the deletion of any possible proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, str) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Depend on the other end of the primary key if it's a relation
if primary_key_rel:
dependencies.append((
primary_key_rel._meta.app_label,
primary_key_rel._meta.object_name,
None,
True
))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[d for d in model_state.fields if d[0] not in related_fields],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
dependencies=dependencies,
beginning=True,
)
# Don't add operations which modify the database for unmanaged models
if not model_opts.managed:
continue
# Generate operations for each related field
for name, field in sorted(related_fields.items()):
dependencies = self._get_dependencies_for_foreign_key(field)
# Depend on our own model being created
dependencies.append((app_label, model_name, None, True))
# Make operation
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=name,
field=field,
),
dependencies=list(set(dependencies)),
)
# Generate other opns
related_dependencies = [
(app_label, model_name, name, True)
for name, field in sorted(related_fields.items())
]
related_dependencies.append((app_label, model_name, None, True))
for index in indexes:
self.add_operation(
app_label,
operations.AddIndex(
model_name=model_name,
index=index,
),
dependencies=related_dependencies,
)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=unique_together,
),
dependencies=related_dependencies
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=index_together,
),
dependencies=related_dependencies
)
if order_with_respect_to:
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=order_with_respect_to,
),
dependencies=[
(app_label, model_name, order_with_respect_to, True),
(app_label, model_name, None, True),
]
)
# Fix relationships if the model changed from a proxy model to a
# concrete model.
if (app_label, model_name) in self.old_proxy_keys:
for related_object in model_opts.related_objects:
self.add_operation(
related_object.related_model._meta.app_label,
operations.AlterField(
model_name=related_object.related_model._meta.object_name,
name=related_object.field.name,
field=related_object.field,
),
dependencies=[(app_label, model_name, None, True)],
)
def generate_created_proxies(self):
"""
Makes CreateModel statements for proxy models.
We use the same statements as that way there's less code duplication,
but of course for proxy models we can skip all that pointless field
stuff and just chuck out an operation.
"""
added = set(self.new_proxy_keys) - set(self.old_proxy_keys)
for app_label, model_name in sorted(added):
model_state = self.to_state.models[app_label, model_name]
assert model_state.options.get("proxy")
# Depend on the deletion of any possible non-proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, str) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
# Depend on the deletion of any possible non-proxy version of us
dependencies=dependencies,
)
def generate_deleted_models(self):
"""
Find all deleted models (managed and unmanaged) and make delete
operations for them as well as separate operations to delete any
foreign key or M2M relationships (we'll optimize these back in later
if we can).
We also bring forward removal of any model options that refer to
collections of fields - the inverse of generate_created_models().
"""
new_keys = set(self.new_model_keys).union(self.new_unmanaged_keys)
deleted_models = set(self.old_model_keys) - new_keys
deleted_unmanaged_models = set(self.old_unmanaged_keys) - new_keys
all_deleted_models = chain(sorted(deleted_models), sorted(deleted_unmanaged_models))
for app_label, model_name in all_deleted_models:
model_state = self.from_state.models[app_label, model_name]
model = self.old_apps.get_model(app_label, model_name)
if not model._meta.managed:
# Skip here, no need to handle fields for unmanaged models
continue
# Gather related fields
related_fields = {}
for field in model._meta.local_fields:
if field.remote_field:
if field.remote_field.model:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if (getattr(field.remote_field, "through", None) and
not field.remote_field.through._meta.auto_created):
related_fields[field.name] = field
for field in model._meta.local_many_to_many:
if field.remote_field.model:
related_fields[field.name] = field
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
# Generate option removal first
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=None,
)
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=None,
)
)
# Then remove each related field
for name, field in sorted(related_fields.items()):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=name,
)
)
# Finally, remove the model.
# This depends on both the removal/alteration of all incoming fields
# and the removal of all its own related fields, and if it's
# a through model the field that references it.
dependencies = []
for related_object in model._meta.related_objects:
related_object_app_label = related_object.related_model._meta.app_label
object_name = related_object.related_model._meta.object_name
field_name = related_object.field.name
dependencies.append((related_object_app_label, object_name, field_name, False))
if not related_object.many_to_many:
dependencies.append((related_object_app_label, object_name, field_name, "alter"))
for name, field in sorted(related_fields.items()):
dependencies.append((app_label, model_name, name, False))
# We're referenced in another field's through=
through_user = self.through_users.get((app_label, model_state.name_lower))
if through_user:
dependencies.append((through_user[0], through_user[1], through_user[2], False))
# Finally, make the operation, deduping any dependencies
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
dependencies=list(set(dependencies)),
)
def generate_deleted_proxies(self):
"""
Makes DeleteModel statements for proxy models.
"""
deleted = set(self.old_proxy_keys) - set(self.new_proxy_keys)
for app_label, model_name in sorted(deleted):
model_state = self.from_state.models[app_label, model_name]
assert model_state.options.get("proxy")
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
)
def generate_renamed_fields(self):
"""
Works out renamed fields
"""
self.renamed_fields = {}
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Scan to see if this is actually a rename!
field_dec = self.deep_deconstruct(field)
for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys):
if rem_app_label == app_label and rem_model_name == model_name:
old_field_dec = self.deep_deconstruct(old_model_state.get_field_by_name(rem_field_name))
if field.remote_field and field.remote_field.model and 'to' in old_field_dec[2]:
old_rel_to = old_field_dec[2]['to']
if old_rel_to in self.renamed_models_rel:
old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to]
if old_field_dec == field_dec:
if self.questioner.ask_rename(model_name, rem_field_name, field_name, field):
self.add_operation(
app_label,
operations.RenameField(
model_name=model_name,
old_name=rem_field_name,
new_name=field_name,
)
)
self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name))
self.old_field_keys.add((app_label, model_name, field_name))
self.renamed_fields[app_label, model_name, field_name] = rem_field_name
break
def generate_added_fields(self):
"""
Fields that have been added
"""
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
self._generate_added_field(app_label, model_name, field_name)
def _generate_added_field(self, app_label, model_name, field_name):
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Fields that are foreignkeys/m2ms depend on stuff
dependencies = []
if field.remote_field and field.remote_field.model:
dependencies.extend(self._get_dependencies_for_foreign_key(field))
# You can't just add NOT NULL fields with no default or fields
# which don't allow empty strings as default.
preserve_default = True
time_fields = (models.DateField, models.DateTimeField, models.TimeField)
if (not field.null and not field.has_default() and
not field.many_to_many and
not (field.blank and field.empty_strings_allowed) and
not (isinstance(field, time_fields) and field.auto_now)):
field = field.clone()
if isinstance(field, time_fields) and field.auto_now_add:
field.default = self.questioner.ask_auto_now_add_addition(field_name, model_name)
else:
field.default = self.questioner.ask_not_null_addition(field_name, model_name)
preserve_default = False
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
),
dependencies=dependencies,
)
def generate_removed_fields(self):
"""
Fields that have been removed.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys):
self._generate_removed_field(app_label, model_name, field_name)
def _generate_removed_field(self, app_label, model_name, field_name):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
),
# We might need to depend on the removal of an
# order_with_respect_to or index/unique_together operation;
# this is safely ignored if there isn't one
dependencies=[
(app_label, model_name, field_name, "order_wrt_unset"),
(app_label, model_name, field_name, "foo_together_change"),
],
)
def generate_altered_fields(self):
"""
Fields that have been altered.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys.intersection(self.new_field_keys)):
# Did the field change?
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name)
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(old_field_name)
new_field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Implement any model renames on relations; these are handled by RenameModel
# so we need to exclude them from the comparison
if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "model", None):
rename_key = (
new_field.remote_field.model._meta.app_label,
new_field.remote_field.model._meta.model_name,
)
if rename_key in self.renamed_models:
new_field.remote_field.model = old_field.remote_field.model
if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "through", None):
rename_key = (
new_field.remote_field.through._meta.app_label,
new_field.remote_field.through._meta.model_name,
)
if rename_key in self.renamed_models:
new_field.remote_field.through = old_field.remote_field.through
old_field_dec = self.deep_deconstruct(old_field)
new_field_dec = self.deep_deconstruct(new_field)
if old_field_dec != new_field_dec:
both_m2m = old_field.many_to_many and new_field.many_to_many
neither_m2m = not old_field.many_to_many and not new_field.many_to_many
if both_m2m or neither_m2m:
# Either both fields are m2m or neither is
preserve_default = True
if (old_field.null and not new_field.null and not new_field.has_default() and
not new_field.many_to_many):
field = new_field.clone()
new_default = self.questioner.ask_not_null_alteration(field_name, model_name)
if new_default is not models.NOT_PROVIDED:
field.default = new_default
preserve_default = False
else:
field = new_field
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
)
)
else:
# We cannot alter between m2m and concrete fields
self._generate_removed_field(app_label, model_name, field_name)
self._generate_added_field(app_label, model_name, field_name)
def create_altered_indexes(self):
option_name = operations.AddIndex.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_indexes = old_model_state.options[option_name]
new_indexes = new_model_state.options[option_name]
add_idx = [idx for idx in new_indexes if idx not in old_indexes]
rem_idx = [idx for idx in old_indexes if idx not in new_indexes]
self.altered_indexes.update({
(app_label, model_name): {
'added_indexes': add_idx, 'removed_indexes': rem_idx,
}
})
def generate_added_indexes(self):
for (app_label, model_name), alt_indexes in self.altered_indexes.items():
for index in alt_indexes['added_indexes']:
self.add_operation(
app_label,
operations.AddIndex(
model_name=model_name,
index=index,
)
)
def generate_removed_indexes(self):
for (app_label, model_name), alt_indexes in self.altered_indexes.items():
for index in alt_indexes['removed_indexes']:
self.add_operation(
app_label,
operations.RemoveIndex(
model_name=model_name,
name=index.name,
)
)
def _get_dependencies_for_foreign_key(self, field):
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.remote_field.model._meta.app_label
dep_object_name = field.remote_field.model._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
dependencies.append((
field.remote_field.through._meta.app_label,
field.remote_field.through._meta.object_name,
None,
True,
))
return dependencies
def _generate_altered_foo_together(self, operation):
option_name = operation.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
# We run the old version through the field renames to account for those
old_value = old_model_state.options.get(option_name) or set()
if old_value:
old_value = {
tuple(
self.renamed_fields.get((app_label, model_name, n), n)
for n in unique
)
for unique in old_value
}
new_value = new_model_state.options.get(option_name) or set()
if new_value:
new_value = set(new_value)
if old_value != new_value:
dependencies = []
for foo_togethers in new_value:
for field_name in foo_togethers:
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
if field.remote_field and field.remote_field.model:
dependencies.extend(self._get_dependencies_for_foreign_key(field))
self.add_operation(
app_label,
operation(
name=model_name,
**{option_name: new_value}
),
dependencies=dependencies,
)
def generate_altered_unique_together(self):
self._generate_altered_foo_together(operations.AlterUniqueTogether)
def generate_altered_index_together(self):
self._generate_altered_foo_together(operations.AlterIndexTogether)
def generate_altered_db_table(self):
models_to_check = self.kept_model_keys.union(self.kept_proxy_keys).union(self.kept_unmanaged_keys)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_db_table_name = old_model_state.options.get('db_table')
new_db_table_name = new_model_state.options.get('db_table')
if old_db_table_name != new_db_table_name:
self.add_operation(
app_label,
operations.AlterModelTable(
name=model_name,
table=new_db_table_name,
)
)
def generate_altered_options(self):
"""
Works out if any non-schema-affecting options have changed and
makes an operation to represent them in state changes (in case Python
code in migrations needs them)
"""
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys
).union(
self.kept_unmanaged_keys
).union(
# unmanaged converted to managed
set(self.old_unmanaged_keys).intersection(self.new_model_keys)
).union(
# managed converted to unmanaged
set(self.old_model_keys).intersection(self.new_unmanaged_keys)
)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_options = dict(
option for option in old_model_state.options.items()
if option[0] in AlterModelOptions.ALTER_OPTION_KEYS
)
new_options = dict(
option for option in new_model_state.options.items()
if option[0] in AlterModelOptions.ALTER_OPTION_KEYS
)
if old_options != new_options:
self.add_operation(
app_label,
operations.AlterModelOptions(
name=model_name,
options=new_options,
)
)
def generate_altered_order_with_respect_to(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if (old_model_state.options.get("order_with_respect_to") !=
new_model_state.options.get("order_with_respect_to")):
# Make sure it comes second if we're adding
# (removal dependency is part of RemoveField)
dependencies = []
if new_model_state.options.get("order_with_respect_to"):
dependencies.append((
app_label,
model_name,
new_model_state.options["order_with_respect_to"],
True,
))
# Actually generate the operation
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=new_model_state.options.get('order_with_respect_to'),
),
dependencies=dependencies,
)
def generate_altered_managers(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.managers != new_model_state.managers:
self.add_operation(
app_label,
operations.AlterModelManagers(
name=model_name,
managers=new_model_state.managers,
)
)
def arrange_for_graph(self, changes, graph, migration_name=None):
"""
Takes in a result from changes() and a MigrationGraph,
and fixes the names and dependencies of the changes so they
extend the graph from the leaf nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
continue
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
if i == 0 and not app_leaf:
new_name = "0001_%s" % migration_name if migration_name else "0001_initial"
else:
new_name = "%04i_%s" % (
next_number,
migration_name or self.suggest_name(migration.operations)[:100],
)
name_map[(app_label, migration.name)] = (app_label, new_name)
next_number += 1
migration.name = new_name
# Now fix dependencies
for app_label, migrations in changes.items():
for migration in migrations:
migration.dependencies = [name_map.get(d, d) for d in migration.dependencies]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Takes changes from arrange_for_graph and set of app labels and
returns a modified set of changes which trims out as many migrations
that are not in app_labels as possible.
Note that some other migrations may still be present, as they may be
required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
for app_label in list(required_apps):
required_apps.update(app_dependencies.get(app_label, set()))
# Remove all migrations that aren't needed
for app_label in list(changes.keys()):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def suggest_name(cls, ops):
"""
Given a set of operations, suggests a name for the migration
they might represent. Names are not guaranteed to be unique,
but we put some effort in to the fallback name to avoid VCS conflicts
if we can.
"""
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
return ops[0].name_lower
elif isinstance(ops[0], operations.DeleteModel):
return "delete_%s" % ops[0].name_lower
elif isinstance(ops[0], operations.AddField):
return "%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif isinstance(ops[0], operations.RemoveField):
return "remove_%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif len(ops) > 1:
if all(isinstance(o, operations.CreateModel) for o in ops):
return "_".join(sorted(o.name_lower for o in ops))
return "auto_%s" % get_migration_name_timestamp()
@classmethod
def parse_number(cls, name):
"""
Given a migration name, tries to extract a number from the
beginning of it. If no number found, returns None.
"""
match = re.match(r'^\d+', name)
if match:
return int(match.group())
return None
|
|
#!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Handling of the <message> element.
'''
import re
import types
from grit.node import base
import grit.format.rc_header
import grit.format.rc
from grit import clique
from grit import exception
from grit import tclib
from grit import util
# Finds whitespace at the start and end of a string which can be multiline.
_WHITESPACE = re.compile('(?P<start>\s*)(?P<body>.+?)(?P<end>\s*)\Z',
re.DOTALL | re.MULTILINE)
class MessageNode(base.ContentNode):
'''A <message> element.'''
# For splitting a list of things that can be separated by commas or
# whitespace
_SPLIT_RE = re.compile('\s*,\s*|\s+')
def __init__(self):
super(type(self), self).__init__()
# Valid after EndParsing, this is the MessageClique that contains the
# source message and any translations of it that have been loaded.
self.clique = None
# We don't send leading and trailing whitespace into the translation
# console, but rather tack it onto the source message and any
# translations when formatting them into RC files or what have you.
self.ws_at_start = '' # Any whitespace characters at the start of the text
self.ws_at_end = '' # --"-- at the end of the text
# A list of "shortcut groups" this message is in. We check to make sure
# that shortcut keys (e.g. &J) within each shortcut group are unique.
self.shortcut_groups_ = []
def _IsValidChild(self, child):
return isinstance(child, (PhNode))
def _IsValidAttribute(self, name, value):
if name not in ['name', 'offset', 'translateable', 'desc', 'meaning',
'internal_comment', 'shortcut_groups', 'custom_type',
'validation_expr', 'use_name_for_id']:
return False
if name == 'translateable' and value not in ['true', 'false']:
return False
return True
def MandatoryAttributes(self):
return ['name|offset']
def DefaultAttributes(self):
return {
'translateable' : 'true',
'desc' : '',
'meaning' : '',
'internal_comment' : '',
'shortcut_groups' : '',
'custom_type' : '',
'validation_expr' : '',
'use_name_for_id' : 'false',
}
def GetTextualIds(self):
'''
Returns the concatenation of the parent's node first_id and
this node's offset if it has one, otherwise just call the
superclass' implementation
'''
if 'offset' in self.attrs:
# we search for the first grouping node in the parents' list
# to take care of the case where the first parent is an <if> node
grouping_parent = self.parent
import grit.node.empty
while grouping_parent and not isinstance(grouping_parent,
grit.node.empty.GroupingNode):
grouping_parent = grouping_parent.parent
assert 'first_id' in grouping_parent.attrs
return [grouping_parent.attrs['first_id'] + '_' + self.attrs['offset']]
else:
return super(type(self), self).GetTextualIds()
def IsTranslateable(self):
return self.attrs['translateable'] == 'true'
def ItemFormatter(self, t):
# Only generate an output if the if condition is satisfied.
if not self.SatisfiesOutputCondition():
return super(type(self), self).ItemFormatter(t)
if t == 'rc_header':
return grit.format.rc_header.Item()
elif t in ('rc_all', 'rc_translateable', 'rc_nontranslateable'):
return grit.format.rc.Message()
elif t == 'js_map_format':
return grit.format.js_map_format.Message()
else:
return super(type(self), self).ItemFormatter(t)
def EndParsing(self):
super(type(self), self).EndParsing()
# Make the text (including placeholder references) and list of placeholders,
# then strip and store leading and trailing whitespace and create the
# tclib.Message() and a clique to contain it.
text = ''
placeholders = []
for item in self.mixed_content:
if isinstance(item, types.StringTypes):
text += item
else:
presentation = item.attrs['name'].upper()
text += presentation
ex = ' '
if len(item.children):
ex = item.children[0].GetCdata()
original = item.GetCdata()
placeholders.append(tclib.Placeholder(presentation, original, ex))
m = _WHITESPACE.match(text)
if m:
self.ws_at_start = m.group('start')
self.ws_at_end = m.group('end')
text = m.group('body')
self.shortcut_groups_ = self._SPLIT_RE.split(self.attrs['shortcut_groups'])
self.shortcut_groups_ = [i for i in self.shortcut_groups_ if i != '']
description_or_id = self.attrs['desc']
if description_or_id == '' and 'name' in self.attrs:
description_or_id = 'ID: %s' % self.attrs['name']
assigned_id = None
if self.attrs['use_name_for_id'] == 'true':
assigned_id = self.attrs['name']
message = tclib.Message(text=text, placeholders=placeholders,
description=description_or_id,
meaning=self.attrs['meaning'],
assigned_id=assigned_id)
self.clique = self.UberClique().MakeClique(message, self.IsTranslateable())
for group in self.shortcut_groups_:
self.clique.AddToShortcutGroup(group)
if self.attrs['custom_type'] != '':
self.clique.SetCustomType(util.NewClassInstance(self.attrs['custom_type'],
clique.CustomType))
elif self.attrs['validation_expr'] != '':
self.clique.SetCustomType(
clique.OneOffCustomType(self.attrs['validation_expr']))
def GetCliques(self):
if self.clique:
return [self.clique]
else:
return []
def Translate(self, lang):
'''Returns a translated version of this message.
'''
assert self.clique
msg = self.clique.MessageForLanguage(lang,
self.PseudoIsAllowed(),
self.ShouldFallbackToEnglish()
).GetRealContent()
return msg.replace('[GRITLANGCODE]', lang)
def NameOrOffset(self):
if 'name' in self.attrs:
return self.attrs['name']
else:
return self.attrs['offset']
def GetDataPackPair(self, output_dir, lang):
'''Returns a (id, string) pair that represents the string id and the string
in utf8. This is used to generate the data pack data file.
'''
from grit.format import rc_header
id_map = rc_header.Item.tids_
id = id_map[self.GetTextualIds()[0]]
message = self.ws_at_start + self.Translate(lang) + self.ws_at_end
if "\\n" in message:
# Windows automatically translates \n to a new line, but GTK+ doesn't.
# Manually do the conversion here rather than at run time.
message = message.replace("\\n", "\n")
# |message| is a python unicode string, so convert to a utf16 byte stream
# because that's the format of datapacks. We skip the first 2 bytes
# because it is the BOM.
return id, message.encode('utf16')[2:]
# static method
def Construct(parent, message, name, desc='', meaning='', translateable=True):
'''Constructs a new message node that is a child of 'parent', with the
name, desc, meaning and translateable attributes set using the same-named
parameters and the text of the message and any placeholders taken from
'message', which must be a tclib.Message() object.'''
# Convert type to appropriate string
if translateable:
translateable = 'true'
else:
translateable = 'false'
node = MessageNode()
node.StartParsing('message', parent)
node.HandleAttribute('name', name)
node.HandleAttribute('desc', desc)
node.HandleAttribute('meaning', meaning)
node.HandleAttribute('translateable', translateable)
items = message.GetContent()
for ix in range(len(items)):
if isinstance(items[ix], types.StringTypes):
text = items[ix]
# Ensure whitespace at front and back of message is correctly handled.
if ix == 0:
text = "'''" + text
if ix == len(items) - 1:
text = text + "'''"
node.AppendContent(text)
else:
phnode = PhNode()
phnode.StartParsing('ph', node)
phnode.HandleAttribute('name', items[ix].GetPresentation())
phnode.AppendContent(items[ix].GetOriginal())
if len(items[ix].GetExample()) and items[ix].GetExample() != ' ':
exnode = ExNode()
exnode.StartParsing('ex', phnode)
exnode.AppendContent(items[ix].GetExample())
exnode.EndParsing()
phnode.AddChild(exnode)
phnode.EndParsing()
node.AddChild(phnode)
node.EndParsing()
return node
Construct = staticmethod(Construct)
class PhNode(base.ContentNode):
'''A <ph> element.'''
def _IsValidChild(self, child):
return isinstance(child, ExNode)
def MandatoryAttributes(self):
return ['name']
def EndParsing(self):
super(type(self), self).EndParsing()
# We only allow a single example for each placeholder
if len(self.children) > 1:
raise exception.TooManyExamples()
class ExNode(base.ContentNode):
'''An <ex> element.'''
pass
|
|
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for scripts/docstrings_checker."""
from __future__ import annotations
import ast
import contextlib
import unittest
from . import docstrings_checker # isort:skip
import astroid # isort:skip
from pylint.checkers import utils # isort:skip
class ASTDocstringsCheckerTest(unittest.TestCase):
"""Class for testing the docstrings_checker script."""
def test_build_regex_from_args_one_arg(self):
docstring_checker = docstrings_checker.ASTDocStringChecker()
args = ['arg_name0']
expected_result = r'(Args:)[\S\s]*(arg_name0:)'
result = docstring_checker.build_regex_from_args(args)
self.assertEqual(result, expected_result)
def test_build_regex_from_args_multiple_args(self):
docstring_checker = docstrings_checker.ASTDocStringChecker()
args = ['arg_name0', 'arg_name1']
expected_result = r'(Args:)[\S\s]*(arg_name0:)[\S\s]*(arg_name1:)'
result = docstring_checker.build_regex_from_args(args)
self.assertEqual(result, expected_result)
def test_build_regex_from_args_empty_list_returns_none(self):
docstring_checker = docstrings_checker.ASTDocStringChecker()
args = []
expected_result = None
result = docstring_checker.build_regex_from_args(args)
self.assertEqual(result, expected_result)
def test_compare_arg_order_one_matching_arg_returns_empty_list(self):
docstring_checker = docstrings_checker.ASTDocStringChecker()
func_args = ['arg_name']
docstring_args = """Description
Args:
arg_name: description
"""
expected_result = []
result = docstring_checker.compare_arg_order(func_args, docstring_args)
self.assertEqual(result, expected_result)
def test_compare_arg_order_no_colon_returns_error(self):
docstring_checker = docstrings_checker.ASTDocStringChecker()
func_args = ['arg_name']
docstring_args = """Description
Args:
arg_name
"""
expected_result = ['Arg not followed by colon: arg_name']
result = docstring_checker.compare_arg_order(func_args, docstring_args)
self.assertEqual(result, expected_result)
def test_compare_arg_order_two_matching_ordered_args_success(self):
docstring_checker = docstrings_checker.ASTDocStringChecker()
func_args = ['arg_name1', 'arg_name2']
docstring_args = """Description
Args:
arg_name1: description,
arg_name2: description
"""
expected_result = []
result = docstring_checker.compare_arg_order(func_args, docstring_args)
self.assertEqual(result, expected_result)
def test_compare_arg_order_empty_docstring_exits_without_error(self):
docstring_checker = docstrings_checker.ASTDocStringChecker()
func_args = ['arg_name1']
docstring_args = ''
expected_result = []
result = docstring_checker.compare_arg_order(func_args, docstring_args)
self.assertEqual(result, expected_result)
def test_compare_arg_order_no_arg_header_exits_without_error(self):
docstring_checker = docstrings_checker.ASTDocStringChecker()
func_args = ['arg_name1']
docstring_args = 'I only have a description.'
expected_result = []
result = docstring_checker.compare_arg_order(func_args, docstring_args)
self.assertEqual(result, expected_result)
def test_compare_arg_order_missing_arg_returns_arg(self):
docstring_checker = docstrings_checker.ASTDocStringChecker()
func_args = ['arg_name1', 'arg_name2']
docstring_args = """Description
Args:
arg_name1: description
"""
expected_result = ['Arg missing from docstring: arg_name2']
result = docstring_checker.compare_arg_order(func_args, docstring_args)
self.assertEqual(result, expected_result)
def test_compare_arg_order_missing_first_arg_returns_one_error(self):
docstring_checker = docstrings_checker.ASTDocStringChecker()
func_args = ['arg_name1', 'arg_name2']
docstring_args = """Description
Args:
arg_name2: description
"""
expected_result = ['Arg missing from docstring: arg_name1']
result = docstring_checker.compare_arg_order(func_args, docstring_args)
self.assertEqual(result, expected_result)
def test_compare_arg_order_misordered_args_returns_one_error(self):
docstring_checker = docstrings_checker.ASTDocStringChecker()
func_args = ['arg_name1', 'arg_name2']
docstring_args = """Description
Args:
arg_name2: description
arg_name1: description
"""
expected_result = ['Arg ordering error in docstring.']
result = docstring_checker.compare_arg_order(func_args, docstring_args)
self.assertEqual(result, expected_result)
def test_compare_arg_order_mention_arg_without_colon_has_no_effect(self):
docstring_checker = docstrings_checker.ASTDocStringChecker()
func_args = ['arg_name1', 'arg_name2']
docstring_args = """Description
Args:
arg_name1: description involving arg_name2,
arg_name2: description involving arg_name1
"""
expected_result = []
result = docstring_checker.compare_arg_order(func_args, docstring_args)
self.assertEqual(result, expected_result)
def test_compare_arg_order_arg_substring_not_confused(self):
docstring_checker = docstrings_checker.ASTDocStringChecker()
func_args = ['this_has_a_substring', 'intermediate_arg', 'substring']
docstring_args = """Description
Args:
this_has_a_substring: description,
intermediate_arg: description,
substring: description
"""
expected_result = []
result = docstring_checker.compare_arg_order(func_args, docstring_args)
self.assertEqual(result, expected_result)
def test_compare_arg_order_multi_line_descriptions_success(self):
docstring_checker = docstrings_checker.ASTDocStringChecker()
func_args = ['arg_name1', 'arg_name2']
docstring_args = """Description
Args:
arg_name1: description that goes on for a
long time.
arg_name2: description
"""
expected_result = []
result = docstring_checker.compare_arg_order(func_args, docstring_args)
self.assertEqual(result, expected_result)
def test_space_indentation(self):
sample_string = ' This is a sample string.'
self.assertEqual(docstrings_checker.space_indentation(sample_string), 5)
def test_check_docstrings_arg_order(self):
docstring_checker = docstrings_checker.ASTDocStringChecker()
ast_file = ast.walk(ast.parse(
"""
def func(test_var_one, test_var_two): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable.
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result"""))
func_defs = [n for n in ast_file if isinstance(n, ast.FunctionDef)]
self.assertEqual(len(func_defs), 1)
func_result = docstring_checker.check_docstrings_arg_order(func_defs[0])
self.assertEqual(func_result, [])
def test_possible_exc_types_with_inference_error(self):
@contextlib.contextmanager
def swap(obj, attr, newvalue):
"""Swap an object's attribute value within the context of a
'with' statement. The object can be anything that supports
getattr and setattr, such as class instances, modules, etc.
"""
original = getattr(obj, attr)
setattr(obj, attr, newvalue)
try:
yield
finally:
setattr(obj, attr, original)
raise_node = astroid.extract_node(
"""
def func():
raise Exception('An exception.') #@
""")
node_ignores_exception_swap = swap(
utils, 'node_ignores_exception',
lambda _, __: (_ for _ in ()).throw(astroid.InferenceError()))
with node_ignores_exception_swap:
exceptions = docstrings_checker.possible_exc_types(raise_node)
self.assertEqual(exceptions, set([]))
def test_possible_exc_types_with_exception_message(self):
raise_node = astroid.extract_node(
"""
def func():
\"\"\"Function to test raising exceptions.\"\"\"
raise Exception('An exception.') #@
""")
exceptions = docstrings_checker.possible_exc_types(raise_node)
self.assertEqual(exceptions, set(['Exception']))
def test_possible_exc_types_with_no_exception(self):
raise_node = astroid.extract_node(
"""
def func():
\"\"\"Function to test raising exceptions.\"\"\"
raise #@
""")
exceptions = docstrings_checker.possible_exc_types(raise_node)
self.assertEqual(exceptions, set([]))
def test_possible_exc_types_with_exception_inside_function(self):
raise_node = astroid.extract_node(
"""
def func():
try:
raise Exception('An exception.')
except Exception:
raise #@
""")
exceptions = docstrings_checker.possible_exc_types(raise_node)
self.assertEqual(exceptions, set(['Exception']))
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.dtypes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
def _is_numeric_dtype_enum(datatype_enum):
return (datatype_enum != types_pb2.DT_INVALID and
datatype_enum != types_pb2.DT_RESOURCE and
datatype_enum != types_pb2.DT_RESOURCE_REF)
class TypesTest(test_util.TensorFlowTestCase):
def testAllTypesConstructible(self):
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
self.assertEqual(datatype_enum,
dtypes.DType(datatype_enum).as_datatype_enum)
def testAllTypesConvertibleToDType(self):
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
self.assertEqual(datatype_enum,
dtypes.as_dtype(datatype_enum).as_datatype_enum)
def testAllTypesConvertibleToNumpyDtype(self):
for datatype_enum in types_pb2.DataType.values():
if not _is_numeric_dtype_enum(datatype_enum):
continue
dtype = dtypes.as_dtype(datatype_enum)
numpy_dtype = dtype.as_numpy_dtype
_ = np.empty((1, 1, 1, 1), dtype=numpy_dtype)
if dtype.base_dtype != dtypes.bfloat16:
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
self.assertEqual(
dtypes.as_dtype(datatype_enum).base_dtype,
dtypes.as_dtype(numpy_dtype))
def testInvalid(self):
with self.assertRaises(TypeError):
dtypes.DType(types_pb2.DT_INVALID)
with self.assertRaises(TypeError):
dtypes.as_dtype(types_pb2.DT_INVALID)
def testNumpyConversion(self):
self.assertIs(dtypes.float32, dtypes.as_dtype(np.float32))
self.assertIs(dtypes.float64, dtypes.as_dtype(np.float64))
self.assertIs(dtypes.int32, dtypes.as_dtype(np.int32))
self.assertIs(dtypes.int64, dtypes.as_dtype(np.int64))
self.assertIs(dtypes.uint8, dtypes.as_dtype(np.uint8))
self.assertIs(dtypes.uint16, dtypes.as_dtype(np.uint16))
self.assertIs(dtypes.int16, dtypes.as_dtype(np.int16))
self.assertIs(dtypes.int8, dtypes.as_dtype(np.int8))
self.assertIs(dtypes.complex64, dtypes.as_dtype(np.complex64))
self.assertIs(dtypes.complex128, dtypes.as_dtype(np.complex128))
self.assertIs(dtypes.string, dtypes.as_dtype(np.object))
self.assertIs(dtypes.string,
dtypes.as_dtype(np.array(["foo", "bar"]).dtype))
self.assertIs(dtypes.bool, dtypes.as_dtype(np.bool))
with self.assertRaises(TypeError):
dtypes.as_dtype(np.dtype([("f1", np.uint), ("f2", np.int32)]))
def testRealDtype(self):
for dtype in [
dtypes.float32, dtypes.float64, dtypes.bool, dtypes.uint8, dtypes.int8,
dtypes.int16, dtypes.int32, dtypes.int64
]:
self.assertIs(dtype.real_dtype, dtype)
self.assertIs(dtypes.complex64.real_dtype, dtypes.float32)
self.assertIs(dtypes.complex128.real_dtype, dtypes.float64)
def testStringConversion(self):
self.assertIs(dtypes.float32, dtypes.as_dtype("float32"))
self.assertIs(dtypes.float64, dtypes.as_dtype("float64"))
self.assertIs(dtypes.int32, dtypes.as_dtype("int32"))
self.assertIs(dtypes.uint8, dtypes.as_dtype("uint8"))
self.assertIs(dtypes.uint16, dtypes.as_dtype("uint16"))
self.assertIs(dtypes.int16, dtypes.as_dtype("int16"))
self.assertIs(dtypes.int8, dtypes.as_dtype("int8"))
self.assertIs(dtypes.string, dtypes.as_dtype("string"))
self.assertIs(dtypes.complex64, dtypes.as_dtype("complex64"))
self.assertIs(dtypes.complex128, dtypes.as_dtype("complex128"))
self.assertIs(dtypes.int64, dtypes.as_dtype("int64"))
self.assertIs(dtypes.bool, dtypes.as_dtype("bool"))
self.assertIs(dtypes.qint8, dtypes.as_dtype("qint8"))
self.assertIs(dtypes.quint8, dtypes.as_dtype("quint8"))
self.assertIs(dtypes.qint32, dtypes.as_dtype("qint32"))
self.assertIs(dtypes.bfloat16, dtypes.as_dtype("bfloat16"))
self.assertIs(dtypes.float32_ref, dtypes.as_dtype("float32_ref"))
self.assertIs(dtypes.float64_ref, dtypes.as_dtype("float64_ref"))
self.assertIs(dtypes.int32_ref, dtypes.as_dtype("int32_ref"))
self.assertIs(dtypes.uint8_ref, dtypes.as_dtype("uint8_ref"))
self.assertIs(dtypes.int16_ref, dtypes.as_dtype("int16_ref"))
self.assertIs(dtypes.int8_ref, dtypes.as_dtype("int8_ref"))
self.assertIs(dtypes.string_ref, dtypes.as_dtype("string_ref"))
self.assertIs(dtypes.complex64_ref, dtypes.as_dtype("complex64_ref"))
self.assertIs(dtypes.complex128_ref, dtypes.as_dtype("complex128_ref"))
self.assertIs(dtypes.int64_ref, dtypes.as_dtype("int64_ref"))
self.assertIs(dtypes.bool_ref, dtypes.as_dtype("bool_ref"))
self.assertIs(dtypes.qint8_ref, dtypes.as_dtype("qint8_ref"))
self.assertIs(dtypes.quint8_ref, dtypes.as_dtype("quint8_ref"))
self.assertIs(dtypes.qint32_ref, dtypes.as_dtype("qint32_ref"))
self.assertIs(dtypes.bfloat16_ref, dtypes.as_dtype("bfloat16_ref"))
with self.assertRaises(TypeError):
dtypes.as_dtype("not_a_type")
def testDTypesHaveUniqueNames(self):
dtypez = []
names = set()
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
dtype = dtypes.as_dtype(datatype_enum)
dtypez.append(dtype)
names.add(dtype.name)
self.assertEqual(len(dtypez), len(names))
def testIsInteger(self):
self.assertEqual(dtypes.as_dtype("int8").is_integer, True)
self.assertEqual(dtypes.as_dtype("int16").is_integer, True)
self.assertEqual(dtypes.as_dtype("int32").is_integer, True)
self.assertEqual(dtypes.as_dtype("int64").is_integer, True)
self.assertEqual(dtypes.as_dtype("uint8").is_integer, True)
self.assertEqual(dtypes.as_dtype("uint16").is_integer, True)
self.assertEqual(dtypes.as_dtype("complex64").is_integer, False)
self.assertEqual(dtypes.as_dtype("complex128").is_integer, False)
self.assertEqual(dtypes.as_dtype("float").is_integer, False)
self.assertEqual(dtypes.as_dtype("double").is_integer, False)
self.assertEqual(dtypes.as_dtype("string").is_integer, False)
self.assertEqual(dtypes.as_dtype("bool").is_integer, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_integer, False)
def testIsFloating(self):
self.assertEqual(dtypes.as_dtype("int8").is_floating, False)
self.assertEqual(dtypes.as_dtype("int16").is_floating, False)
self.assertEqual(dtypes.as_dtype("int32").is_floating, False)
self.assertEqual(dtypes.as_dtype("int64").is_floating, False)
self.assertEqual(dtypes.as_dtype("uint8").is_floating, False)
self.assertEqual(dtypes.as_dtype("uint16").is_floating, False)
self.assertEqual(dtypes.as_dtype("complex64").is_floating, False)
self.assertEqual(dtypes.as_dtype("complex128").is_floating, False)
self.assertEqual(dtypes.as_dtype("float32").is_floating, True)
self.assertEqual(dtypes.as_dtype("float64").is_floating, True)
self.assertEqual(dtypes.as_dtype("string").is_floating, False)
self.assertEqual(dtypes.as_dtype("bool").is_floating, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_integer, False)
def testIsComplex(self):
self.assertEqual(dtypes.as_dtype("int8").is_complex, False)
self.assertEqual(dtypes.as_dtype("int16").is_complex, False)
self.assertEqual(dtypes.as_dtype("int32").is_complex, False)
self.assertEqual(dtypes.as_dtype("int64").is_complex, False)
self.assertEqual(dtypes.as_dtype("uint8").is_complex, False)
self.assertEqual(dtypes.as_dtype("uint16").is_complex, False)
self.assertEqual(dtypes.as_dtype("complex64").is_complex, True)
self.assertEqual(dtypes.as_dtype("complex128").is_complex, True)
self.assertEqual(dtypes.as_dtype("float32").is_complex, False)
self.assertEqual(dtypes.as_dtype("float64").is_complex, False)
self.assertEqual(dtypes.as_dtype("string").is_complex, False)
self.assertEqual(dtypes.as_dtype("bool").is_complex, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_integer, False)
def testIsUnsigned(self):
self.assertEqual(dtypes.as_dtype("int8").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("int16").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("int32").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("int64").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("uint8").is_unsigned, True)
self.assertEqual(dtypes.as_dtype("uint16").is_unsigned, True)
self.assertEqual(dtypes.as_dtype("float32").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("float64").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("bool").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("string").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("complex64").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("complex128").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_integer, False)
def testMinMax(self):
# make sure min/max evaluates for all data types that have min/max
for datatype_enum in types_pb2.DataType.values():
if not _is_numeric_dtype_enum(datatype_enum):
continue
dtype = dtypes.as_dtype(datatype_enum)
numpy_dtype = dtype.as_numpy_dtype
# ignore types for which there are no minimum/maximum (or we cannot
# compute it, such as for the q* types)
if (dtype.is_quantized or dtype.base_dtype == dtypes.bool or
dtype.base_dtype == dtypes.string or
dtype.base_dtype == dtypes.complex64 or
dtype.base_dtype == dtypes.complex128):
continue
print("%s: %s - %s" % (dtype, dtype.min, dtype.max))
# check some values that are known
if numpy_dtype == np.bool_:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 1)
if numpy_dtype == np.int8:
self.assertEquals(dtype.min, -128)
self.assertEquals(dtype.max, 127)
if numpy_dtype == np.int16:
self.assertEquals(dtype.min, -32768)
self.assertEquals(dtype.max, 32767)
if numpy_dtype == np.int32:
self.assertEquals(dtype.min, -2147483648)
self.assertEquals(dtype.max, 2147483647)
if numpy_dtype == np.int64:
self.assertEquals(dtype.min, -9223372036854775808)
self.assertEquals(dtype.max, 9223372036854775807)
if numpy_dtype == np.uint8:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 255)
if numpy_dtype == np.uint16:
if dtype == dtypes.uint16:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 65535)
elif dtype == dtypes.bfloat16:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 4294967295)
if numpy_dtype == np.uint32:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 18446744073709551615)
if numpy_dtype in (np.float16, np.float32, np.float64):
self.assertEquals(dtype.min, np.finfo(numpy_dtype).min)
self.assertEquals(dtype.max, np.finfo(numpy_dtype).max)
def testRepr(self):
for enum, name in dtypes._TYPE_TO_STRING.items():
if enum > 100:
continue
dtype = dtypes.DType(enum)
self.assertEquals(repr(dtype), "tf." + name)
import tensorflow as tf
dtype2 = eval(repr(dtype))
self.assertEquals(type(dtype2), dtypes.DType)
self.assertEquals(dtype, dtype2)
def testEqWithNonTFTypes(self):
self.assertNotEqual(dtypes.int32, int)
self.assertNotEqual(dtypes.float64, 2.1)
if __name__ == "__main__":
googletest.main()
|
|
import logging
from apps.atencion.forms.ConsultaForm import ConsultaForm
from apps.atencion.forms.DetalleRecetaForm import DetalleRecetaForm
from apps.atencion.forms.TratamientoForm import TratamientoForm
from apps.atencion.models import Consulta, AntecedenteMedico, DiagnosticoConsulta, Tratamiento, DetalleReceta
log = logging.getLogger(__name__)
from apps.utils.security import SecurityKey, log_params, UserToken, get_dep_objects
from django import http
from django.conf.locale import da
from django.core.urlresolvers import reverse, reverse_lazy
from django.db import transaction
from django.shortcuts import render, render_to_response, redirect
from django.views.generic import TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.views.generic.list import ListView
from django.core import serializers
from django.http import HttpResponse, request
from django.db.models import Max, Sum, Count
from django.contrib import messages
from django.shortcuts import get_list_or_404, get_object_or_404
from datetime import datetime, time, date
from django.contrib.auth import authenticate, login, logout
from django.utils.encoding import force_text
from django.contrib.messages.views import SuccessMessageMixin
from apps.utils.decorators import permission_resource_required
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from apps.utils.forms import empty
import json
from django.utils.text import capfirst, get_text_list
from .forms.PersonaForm import PersonaForm
from .forms.LaboratorioForm import LaboratorioForm
from .forms.ProductoForm import ProductoForm
from .forms.PeriodoForm import PeriodoForm
from .forms.FuncionesVitalesForm import FuncionesVitalesForm
from .forms.UnidadMedidaForm import UnidadMedidaForm
from .forms.HistoriaForm import HistoriaForm
from .forms.DiagnosticoForm import DiagnosticoForm
from .forms.AntecendeMedicoForm import AntecedenteMedicoForm
from .models import (Persona, Producto, Laboratorio, FuncionesVitales,
Periodo, Diagnostico, UnidadMedida, Historia, Departamento, Provincia, Distrito,ReporteAtencion)
# class Persona==============================================================================
class PersonaListView(ListView):
model = Persona
template_name = 'persona/persona_list.html'
paginate_by = settings.PER_PAGE
@method_decorator(permission_resource_required)
def dispatch(self, request, *args, **kwargs):
return super(PersonaListView, self).dispatch(request, *args, **kwargs)
def get_paginate_by(self, queryset):
if 'all' in self.request.GET:
return None
return ListView.get_paginate_by(self, queryset)
def get_queryset(self):
self.o = empty(self.request, 'o', '-id')
self.f = empty(self.request, 'f', 'nombres')
self.q = empty(self.request, 'q', '')
column_contains = u'%s__%s' % (self.f, 'contains')
return self.model.objects.filter(**{column_contains: self.q}).order_by(self.o)
def get_context_data(self, **kwargs):
context = super(PersonaListView, self).get_context_data(**kwargs)
context['opts'] = self.model._meta
context['cmi'] = 'persona'
context['title'] = _('Select %s to change') % capfirst(_('Persona'))
context['o'] = self.o
context['f'] = self.f
context['q'] = self.q.replace('/', '-')
return context
class ProvinciaAjax(TemplateView):
"""docstring for BusquedaAjaxView"""
def get(self, request, *args, **kwargs):
options = '<option value="" selected="selected">---------</option>'
id_departamento = request.GET.get('id')
if id_departamento:
provincias = Provincia.objects.filter(departamento__id=id_departamento)
else:
provincias = Provincia.objects.filter(departamento__id=0)
# data = serializers.serialize('json', distritos, fields=('id', 'distrito'))
for provincia in provincias:
options += '<option value="%s">%s</option>' % (
provincia.pk,
provincia.nombre
)
response = {}
response['provincias'] = options
return http.JsonResponse(response)
class DistritoAjax(TemplateView):
"""docstring for BusquedaAjaxView"""
def get(self, request, *args, **kwargs):
options = '<option value="" selected="selected">---------</option>'
id_provincia = request.GET.get('id')
if id_provincia:
distritos = Distrito.objects.filter(provincia__id=id_provincia)
else:
distritos = Distrito.objects.filter(provincia__id=0)
# data = serializers.serialize('json', distritos, fields=('id', 'distrito'))
for distrito in distritos:
options += '<option value="%s">%s</option>' % (
distrito.pk,
distrito.nombre
)
response = {}
response['distritos'] = options
return http.JsonResponse(response)
class PersonaCreateView(CreateView):
model = Persona
form_class = PersonaForm
template_name = 'persona/persona_add.html'
success_url = reverse_lazy('atencion:persona_list')
@method_decorator(permission_resource_required )
def dispatch(self, request, *args, **kwargs):
return super(PersonaCreateView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(PersonaCreateView, self).get_context_data(**kwargs)
context['opts'] = self.model._meta
context['cmi'] = 'persona'
context['title'] = ('Agregar %s') % ('Persona')
return context
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.usuario = self.request.user
msg = (' %(name)s "%(obj)s" fue creado satisfactoriamente.') % {
'name': capfirst(force_text(self.model._meta.verbose_name)),
'obj': force_text(self.object)
}
if self.object.id:
messages.success(self.request, msg)
log.warning(msg, extra=log_params(self.request))
return super(PersonaCreateView, self).form_valid(form)
class PersonaUpdateView(UpdateView):
model = Persona
template_name = 'persona/persona_add.html'
form_class = PersonaForm
success_url = reverse_lazy('atencion:persona_list')
@method_decorator(permission_resource_required )
def dispatch(self, request, *args, **kwargs):
return super(PersonaUpdateView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(PersonaUpdateView, self).get_context_data(**kwargs)
context['opts'] = self.model._meta
context['cmi'] = 'persona'
context['title'] = _('Add %s') % _('Persona')
return context
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.usuario = self.request.user
msg = _('%(name)s "%(obj)s" fue cambiado satisfactoriamente.') % {
'name': capfirst(force_text(self.model._meta.verbose_name)),
'obj': force_text(self.object)
}
if self.object.id:
messages.success(self.request, msg)
log.warning(msg, extra=log_params(self.request))
return super(PersonaUpdateView, self).form_valid(form)
class PersonaDeleteView(DeleteView):
model = Persona
success_url = reverse_lazy('atencion:persona_list')
@method_decorator(permission_resource_required)
def dispatch(self, request, *args, **kwargs):
try:
self.get_object()
except Exception as e:
messages.error(self.request, e)
log.warning(force_text(e), extra=log_params(self.request))
return HttpResponseRedirect(self.success_url)
return super(PersonaDeleteView, self).dispatch(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
try:
d = self.get_object()
deps, msg = get_dep_objects(d)
print(deps)
if deps:
messages.warning(self.request, ('No se puede Eliminar %(name)s') % {
"name": capfirst(force_text(self.model._meta.verbose_name))
+ ' "' + force_text(d) + '"'
})
raise Exception(msg)
d.delete()
msg = _(' %(name)s "%(obj)s" fue eliminado satisfactoriamente.') % {
'name': capfirst(force_text(self.model._meta.verbose_name)),
'obj': force_text(d)
}
if not d.id:
messages.success(self.request, msg)
log.warning(msg, extra=log_params(self.request))
except Exception as e:
messages.error(request, e)
log.warning(force_text(e), extra=log_params(self.request))
return HttpResponseRedirect(self.success_url)
def get(self, request, *args, **kwargs):
return self.delete(request, *args, **kwargs)
class HitoriaBusquedaTemplateView(TemplateView):
"""Historia Template View.
Clase usada para buscar el historial de una persona.
"""
template_name = "historial/busqueda.html"
formulario = HistoriaForm
def get_context_data(self, **kwargs):
context = super(HitoriaBusquedaTemplateView, self).get_context_data(**kwargs)
context['form'] = self.formulario
def get(self, request, *args, **kwargs):
codigo = request.GET.get('codigo')
estudiante = None
persona = None
matriculado = None
try:
personaes = Persona.objects.get(codigo=codigo) # Busca por el codigo de estudiante
except Exception as e:
personaes = None
#msg =("La persona no Existe)
try:
personaex = Persona.objects.get(dni=codigo)
except Exception as e:
personaex = None
if personaes:
persona = personaes
if persona.es_matriculado:
matriculado = "Matriculado"
if persona.es_estudiante:
estudiante = "Estudiante"
if personaex:
persona = personaex
if persona.es_estudiante:
estudiante = "Estudiante"
if persona.es_matriculado:
matriculado = 'Matriculado'
try:
historia = Historia.objects.get(persona__id=persona.id)
print(historia)
except Exception as e:
historia = None
context = {'persona': persona, 'historia': historia, 'estudiante': estudiante, 'matriculado': matriculado }
#messages.success(self.request, msg)
#log.warning(msg, extra=log_params(self.request))
return self.render_to_response(context)
class HitoriaCreateView(CreateView):
model = Historia
form_class = HistoriaForm
template_name = 'historial/historia_add.html'
def get_success_url(self):
return reverse('atencion:historia_detail', kwargs={'pk': self.object.pk})
def form_valid(self, form):
self.object = form.save(commit=False)
persona = Persona.objects.get(id=self.request.POST['persona'])
if persona.es_estudiante:
self.object.numero = persona.codigo
else:
self.object.numero = persona.dni
return super(HitoriaCreateView, self).form_valid(form)
class HitoriaDetailView(DetailView):
model = Historia
form_f_vitales = FuncionesVitalesForm
template_name = 'historial/historia_detail.html'
form_consulta = ConsultaForm
form_tratamiento = TratamientoForm
form_antecedente = AntecedenteMedicoForm
form_receta = DetalleRecetaForm
def get_context_data(self, **kwargs):
context = super(HitoriaDetailView, self).get_context_data(**kwargs)
try:
antecedente = AntecedenteMedico.objects.get(historia=self.object)
except Exception as e:
antecedente = None
context['form'] = self.form_f_vitales
context['form_receta'] = self.form_receta
context['form_antecedente'] = self.form_antecedente
context['form_consulta'] = self.form_consulta
context['form_tratamiento'] = self.form_tratamiento
consulta = Consulta.objects.filter(historia=self.object).filter(estado=False).last()
context['consulta'] = consulta
context['antecedente'] = antecedente
try:
context['proceso'] = Consulta.objects.get(estado=True, historia=self.object)
except Exception as e:
context['proceso'] = None
return context
class DiagnosticoConsultaCreate(TemplateView):
def post(self, request):
sid = transaction.savepoint()
try:
proceso = json.loads(request.POST.get('proceso'))
historiaid = proceso['historia']
historia = Historia.objects.get(id=historiaid)
consulta = Consulta.objects.get(historia=historia, estado=True)
consulta.examen_fisico = proceso['examen']
consulta.enfermedad_actual = proceso['enfermedad']
consulta.hecho = True
consulta.estado = False
consulta.save()
tratamiento = Tratamiento()
tratamiento.recomendacion = proceso['recomendacion']
tratamiento.consulta = consulta
tratamiento.save()
for c in proceso['medicamento']:
producto = Producto.objects.get(codigo=c['codigo'])
presentacion = UnidadMedida.objects.get(id=c['presentacion'])
receta = DetalleReceta()
receta.tratamiento = tratamiento
receta.producto = producto
receta.cantidad = c['cantidad']
receta.presentacion = presentacion
receta.dosis = c['dosis']
receta.periodo = c['periodo']
receta.save()
for c in proceso['diagnostico']:
diagonostico = Diagnostico.objects.get(id=c['pkey'])
diag = DiagnosticoConsulta()
diag.diagnostico = diagonostico
diag.consulta = consulta
diag.save()
except Exception as e:
print(e)
return HttpResponseRedirect(reverse('atencion:historia_detail', kwargs={'pk': historia.pk}))
class DiagnosticoBuscar(TemplateView):
def get(self, request, *args, **kwargs):
codigo = request.GET.get('codigo')
diagnostico = Diagnostico.objects.get(codigo=codigo)
data = serializers.serialize('json', [diagnostico,])
return HttpResponse(data, content_type='application/json')
class AntecedenteCreateView(CreateView):
model = AntecedenteMedico
form_class = AntecedenteMedicoForm
def get_success_url(self):
return reverse('atencion:historia_detail', kwargs={'pk': self.object.historia.pk})
def form_valid(self, form):
self.object = form.save(commit=False)
historiaid = self.request.POST['historia']
historia = Historia.objects.get(id=historiaid)
self.object.historia = historia
return super(AntecedenteCreateView, self).form_valid(form)
# class Producto==============================================================================
class ProductoListView(ListView):
model = Producto
template_name = 'producto/producto_list.html'
paginate_by = settings.PER_PAGE
@method_decorator(permission_resource_required)
def dispatch(self, request, *args, **kwargs):
return super(ProductoListView, self).dispatch(request, *args, **kwargs)
def get_paginate_by(self, queryset):
if 'all' in self.request.GET:
return None
return ListView.get_paginate_by(self, queryset)
def get_queryset(self):
self.o = empty(self.request, 'o', '-id')
self.f = empty(self.request, 'f', 'codigo')
self.q = empty(self.request, 'q', '')
column_contains = u'%s__%s' % (self.f, 'contains')
return self.model.objects.filter(**{column_contains: self.q}).order_by(self.o)
def get_context_data(self, **kwargs):
context = super(ProductoListView, self).get_context_data(**kwargs)
context['opts'] = self.model._meta
context['cmi'] = 'producto'
context['title'] = _('Select %s to change') % capfirst(_('Producto'))
context['o'] = self.o
context['f'] = self.f
context['q'] = self.q.replace('/', '-')
return context
class ProductoBuscarAjaxView(TemplateView):
def get(self, request, *args, **kwargs):
codigo = request.GET.get('codigo')
print('llego hasta el post')
object = Producto.objects.get(codigo=codigo)
print(object)
data = serializers.serialize('json', [object,])
return HttpResponse(data, content_type='application/json')
class ProductoCreateView(CreateView):
model = Producto
form_class = ProductoForm
template_name = 'producto/producto_add.html'
success_url = reverse_lazy('atencion:producto_list')
@method_decorator(permission_resource_required )
def dispatch(self, request, *args, **kwargs):
return super(ProductoCreateView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ProductoCreateView, self).get_context_data(**kwargs)
context['opts'] = self.model._meta
context['cmi'] = 'producto'
context['title'] = ('Agregar %s') % ('Producto')
return context
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.usuario = self.request.user
msg = _(' %(name)s "%(obj)s" fue creado satisfactoriamente.') % {
'name': capfirst(force_text(self.model._meta.verbose_name)),
'obj': force_text(self.object)
}
if self.object.id:
messages.success(self.request, msg)
log.warning(msg, extra=log_params(self.request))
return super(ProductoCreateView, self).form_valid(form)
class ProductoUpdateView(UpdateView):
model = Producto
template_name = 'producto/producto_add.html'
form_class = ProductoForm
success_url = reverse_lazy('atencion:producto_list')
@method_decorator(permission_resource_required )
def dispatch(self, request, *args, **kwargs):
return super(ProductoUpdateView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ProductoUpdateView, self).get_context_data(**kwargs)
context['opts'] = self.model._meta
context['cmi'] = 'producto'
context['title'] = _('Add %s') % _('Producto')
return context
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.usuario = self.request.user
msg = _('%(name)s "%(obj)s" fue cambiado satisfactoriamente.') % {
'name': capfirst(force_text(self.model._meta.verbose_name)),
'obj': force_text(self.object)
}
if self.object.id:
messages.success(self.request, msg)
log.warning(msg, extra=log_params(self.request))
return super(ProductoUpdateView, self).form_valid(form)
class ProductoDeleteView(DeleteView):
model = Producto
success_url = reverse_lazy('atencion:producto_list')
@method_decorator(permission_resource_required)
def dispatch(self, request, *args, **kwargs):
try:
self.get_object()
except Exception as e:
messages.error(self.request, e)
log.warning(force_text(e), extra=log_params(self.request))
return HttpResponseRedirect(self.success_url)
return super(ProductoDeleteView, self).dispatch(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
try:
d = self.get_object()
deps, msg = get_dep_objects(d)
print(deps)
if deps:
messages.warning(self.request, ('No se puede Eliminar %(name)s') % {
"name": capfirst(force_text(self.model._meta.verbose_name))
+ ' "' + force_text(d) + '"'
})
raise Exception(msg)
d.delete()
msg = _(' %(name)s "%(obj)s" fue eliminado satisfactoriamente.') % {
'name': capfirst(force_text(self.model._meta.verbose_name)),
'obj': force_text(d)
}
if not d.id:
messages.success(self.request, msg)
log.warning(msg, extra=log_params(self.request))
except Exception as e:
messages.error(request, e)
log.warning(force_text(e), extra=log_params(self.request))
return HttpResponseRedirect(self.success_url)
def get(self, request, *args, **kwargs):
return self.delete(request, *args, **kwargs)
# class Laboratorio==============================================================================
class LaboratorioListView(ListView):
model = Laboratorio
template_name = 'laboratorio/laboratorio_list.html'
paginate_by = settings.PER_PAGE
@method_decorator(permission_resource_required)
def dispatch(self, request, *args, **kwargs):
return super(LaboratorioListView, self).dispatch(request, *args, **kwargs)
def get_paginate_by(self, queryset):
if 'all' in self.request.GET:
return None
return ListView.get_paginate_by(self, queryset)
def get_queryset(self):
self.o = empty(self.request, 'o', '-id')
self.f = empty(self.request, 'f', 'hemoglobina')
self.q = empty(self.request, 'q', '')
column_contains = u'%s__%s' % (self.f, 'contains')
return self.model.objects.filter(**{column_contains: self.q}).order_by(self.o)
def get_context_data(self, **kwargs):
context = super(LaboratorioListView, self).get_context_data(**kwargs)
context['opts'] = self.model._meta
context['cmi'] = 'laboratorio'
context['title'] = _('Select %s to change') % capfirst(_('Laboratorio'))
context['o'] = self.o
context['f'] = self.f
context['q'] = self.q.replace('/', '-')
return context
class LaboratorioCreateView(CreateView):
model = Laboratorio
form_class = LaboratorioForm
template_name = 'laboratorio/laboratorio_add.html'
success_url = reverse_lazy('atencion:laboratorio_list')
@method_decorator(permission_resource_required )
def dispatch(self, request, *args, **kwargs):
return super(LaboratorioCreateView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(LaboratorioCreateView, self).get_context_data(**kwargs)
context['opts'] = self.model._meta
context['cmi'] = 'laboratorio'
context['title'] = ('Agregar %s') % ('Laboratorio')
return context
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.usuario = self.request.user
msg = _(' %(name)s "%(obj)s" fue creado satisfactoriamente.') % {
'name': capfirst(force_text(self.model._meta.verbose_name)),
'obj': force_text(self.object)
}
if self.object.id:
messages.success(self.request, msg)
log.warning(msg, extra=log_params(self.request))
return super(LaboratorioCreateView, self).form_valid(form)
class LaboratorioUpdateView(UpdateView):
model = Laboratorio
template_name = 'laboratorio/laboratorio_add.html'
form_class = LaboratorioForm
success_url = reverse_lazy('atencion:laboratorio_list')
@method_decorator(permission_resource_required )
def dispatch(self, request, *args, **kwargs):
return super(LaboratorioUpdateView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(LaboratorioUpdateView, self).get_context_data(**kwargs)
context['opts'] = self.model._meta
context['cmi'] = 'laboratorio'
context['title'] = _('Add %s') % _('Laboratorio')
return context
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.usuario = self.request.user
msg = _('%(name)s "%(obj)s" fue cambiado satisfactoriamente.') % {
'name': capfirst(force_text(self.model._meta.verbose_name)),
'obj': force_text(self.object)
}
if self.object.id:
messages.success(self.request, msg)
log.warning(msg, extra=log_params(self.request))
return super(LaboratorioUpdateView, self).form_valid(form)
class LaboratorioDeleteView(DeleteView):
model = Laboratorio
success_url = reverse_lazy('atencion:laboratorio_list')
@method_decorator(permission_resource_required)
def dispatch(self, request, *args, **kwargs):
try:
self.get_object()
except Exception as e:
messages.error(self.request, e)
log.warning(force_text(e), extra=log_params(self.request))
return HttpResponseRedirect(self.success_url)
return super(LaboratorioDeleteView, self).dispatch(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
try:
d = self.get_object()
deps, msg = get_dep_objects(d)
print(deps)
if deps:
messages.warning(self.request, ('No se puede Eliminar %(name)s') % {
"name": capfirst(force_text(self.model._meta.verbose_name))
+ ' "' + force_text(d) + '"'
})
raise Exception(msg)
d.delete()
msg = _(' %(name)s "%(obj)s" fuel eliminado satisfactoriamente.') % {
'name': capfirst(force_text(self.model._meta.verbose_name)),
'obj': force_text(d)
}
if not d.id:
messages.success(self.request, msg)
log.warning(msg, extra=log_params(self.request))
except Exception as e:
messages.error(request, e)
log.warning(force_text(e), extra=log_params(self.request))
return HttpResponseRedirect(self.success_url)
def get(self, request, *args, **kwargs):
return self.delete(request, *args, **kwargs)
# class FuncionesVitales==============================================================================
class FuncionesVitalesListView(ListView):
model = FuncionesVitales
template_name = 'funciones_vitales/funcionesvitales_list.html'
paginate_by = settings.PER_PAGE
@method_decorator(permission_resource_required)
def dispatch(self, request, *args, **kwargs):
return super(FuncionesVitalesListView, self).dispatch(request, *args, **kwargs)
def get_paginate_by(self, queryset):
if 'all' in self.request.GET:
return None
return ListView.get_paginate_by(self, queryset)
def get_queryset(self):
self.o = empty(self.request, 'o', '-id')
self.f = empty(self.request, 'f', 'peso')
self.q = empty(self.request, 'q', '')
column_contains = u'%s__%s' % (self.f, 'contains')
return self.model.objects.filter(**{column_contains: self.q}).order_by(self.o)
def get_context_data(self, **kwargs):
context = super(FuncionesVitalesListView, self).get_context_data(**kwargs)
context['opts'] = self.model._meta
context['cmi'] = 'funcionesvitales'
context['title'] = _('Select %s to change') % capfirst(_('FuncionesVitales'))
context['o'] = self.o
context['f'] = self.f
context['q'] = self.q.replace('/', '-')
return context
class FuncionesVitalesCreateView(CreateView):
model = FuncionesVitales
form_class = FuncionesVitalesForm
template_name = 'funciones_vitales/funcionesvitales_add.html'
@method_decorator(permission_resource_required )
def dispatch(self, request, *args, **kwargs):
return super(FuncionesVitalesCreateView, self).dispatch(request, *args, **kwargs)
def get_success_url(self):
return reverse('atencion:historia_detail', kwargs={'pk': self.object.consulta.historia.pk})
def form_valid(self, form):
self.object = form.save(commit=False)
historiaid = self.request.POST['historia']
historia = Historia.objects.get(id=historiaid)
consulta = Consulta()
consulta.historia = historia
consulta.save()
self.object.consulta = consulta
return super(FuncionesVitalesCreateView, self).form_valid(form)
class FuncionesVitalesUpdateView(UpdateView):
model = FuncionesVitales
template_name = 'funciones_vitales/funcionesvitales_add.html'
form_class = FuncionesVitalesForm
success_url = reverse_lazy('atencion:funcionesvitales_list')
@method_decorator(permission_resource_required )
def dispatch(self, request, *args, **kwargs):
return super(FuncionesVitalesUpdateView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(FuncionesVitalesUpdateView, self).get_context_data(**kwargs)
context['opts'] = self.model._meta
context['cmi'] = 'funcionesvitales'
context['title'] = _('Add %s') % _('FuncionesVitales')
return context
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.usuario = self.request.user
msg = _('%(name)s "%(obj)s" fue cambiado satisfactoriamente.') % {
'name': capfirst(force_text(self.model._meta.verbose_name)),
'obj': force_text(self.object)
}
if self.object.id:
messages.success(self.request, msg)
log.warning(msg, extra=log_params(self.request))
return super(FuncionesVitalesUpdateView, self).form_valid(form)
class FuncionesVitalesDeleteView(DeleteView):
model = FuncionesVitales
success_url = reverse_lazy('atencion:funcionesvitales_list')
@method_decorator(permission_resource_required)
def dispatch(self, request, *args, **kwargs):
try:
self.get_object()
except Exception as e:
messages.error(self.request, e)
log.warning(force_text(e), extra=log_params(self.request))
return HttpResponseRedirect(self.success_url)
return super(FuncionesVitalesDeleteView, self).dispatch(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
try:
d = self.get_object()
deps, msg = get_dep_objects(d)
print(deps)
if deps:
messages.warning(self.request, ('No se puede Eliminar %(name)s') % {
"name": capfirst(force_text(self.model._meta.verbose_name))
+ ' "' + force_text(d) + '"'
})
raise Exception(msg)
d.delete()
msg = _(' %(name)s "%(obj)s" fuel eliminado satisfactoriamente.') % {
'name': capfirst(force_text(self.model._meta.verbose_name)),
'obj': force_text(d)
}
if not d.id:
messages.success(self.request, msg)
log.warning(msg, extra=log_params(self.request))
except Exception as e:
messages.error(request, e)
log.warning(force_text(e), extra=log_params(self.request))
return HttpResponseRedirect(self.success_url)
def get(self, request, *args, **kwargs):
return self.delete(request, *args, **kwargs)
# class Periodo==============================================================================
class PeriodoListView(ListView):
model = Periodo
template_name = 'periodo/periodo_list.html'
paginate_by = settings.PER_PAGE
@method_decorator(permission_resource_required)
def dispatch(self, request, *args, **kwargs):
return super(PeriodoListView, self).dispatch(request, *args, **kwargs)
def get_paginate_by(self, queryset):
if 'all' in self.request.GET:
return None
return ListView.get_paginate_by(self, queryset)
def get_queryset(self):
self.o = empty(self.request, 'o', '-id')
self.f = empty(self.request, 'f', 'ciclo')
self.q = empty(self.request, 'q', '')
column_contains = u'%s__%s' % (self.f, 'contains')
return self.model.objects.filter(**{column_contains: self.q}).order_by(self.o)
def get_context_data(self, **kwargs):
context = super(PeriodoListView, self).get_context_data(**kwargs)
context['opts'] = self.model._meta
context['cmi'] = 'periodo'
context['title'] = _('Select %s to change') % capfirst(_('Periodo'))
context['o'] = self.o
context['f'] = self.f
context['q'] = self.q.replace('/', '-')
return context
class PeriodoCreateView(CreateView):
model = Periodo
form_class = PeriodoForm
template_name = 'periodo/periodo_add.html'
success_url = reverse_lazy('atencion:periodo_list')
@method_decorator(permission_resource_required )
def dispatch(self, request, *args, **kwargs):
return super(PeriodoCreateView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(PeriodoCreateView, self).get_context_data(**kwargs)
context['opts'] = self.model._meta
context['cmi'] = 'periodo'
context['title'] = ('Agregar %s') % ('Periodo')
return context
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.usuario = self.request.user
msg = _(' %(name)s "%(obj)s" fue creado satisfactoriamente.') % {
'name': capfirst(force_text(self.model._meta.verbose_name)),
'obj': force_text(self.object)
}
if self.object.id:
messages.success(self.request, msg)
log.warning(msg, extra=log_params(self.request))
return super(PeriodoCreateView, self).form_valid(form)
class PeriodoUpdateView(UpdateView):
model = Periodo
template_name = 'periodo/periodo_add.html'
form_class = PeriodoForm
success_url = reverse_lazy('atencion:periodo_list')
@method_decorator(permission_resource_required )
def dispatch(self, request, *args, **kwargs):
return super(PeriodoUpdateView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(PeriodoUpdateView, self).get_context_data(**kwargs)
context['opts'] = self.model._meta
context['cmi'] = 'periodo'
context['title'] = _('Add %s') % _('Periodo')
return context
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.usuario = self.request.user
msg = _('%(name)s "%(obj)s" fue cambiado satisfactoriamente.') % {
'name': capfirst(force_text(self.model._meta.verbose_name)),
'obj': force_text(self.object)
}
if self.object.id:
messages.success(self.request, msg)
log.warning(msg, extra=log_params(self.request))
return super(PeriodoUpdateView, self).form_valid(form)
class PeriodoDeleteView(DeleteView):
model = Periodo
success_url = reverse_lazy('atencion:periodo_list')
@method_decorator(permission_resource_required)
def dispatch(self, request, *args, **kwargs):
try:
self.get_object()
except Exception as e:
messages.error(self.request, e)
log.warning(force_text(e), extra=log_params(self.request))
return HttpResponseRedirect(self.success_url)
return super(PeriodoDeleteView, self).dispatch(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
try:
d = self.get_object()
deps, msg = get_dep_objects(d)
print(deps)
if deps:
messages.warning(self.request, ('No se puede Eliminar %(name)s') % {
"name": capfirst(force_text(self.model._meta.verbose_name))
+ ' "' + force_text(d) + '"'
})
raise Exception(msg)
d.delete()
msg = _(' %(name)s "%(obj)s" fue eliminado satisfactoriamente.') % {
'name': capfirst(force_text(self.model._meta.verbose_name)),
'obj': force_text(d)
}
if not d.id:
messages.success(self.request, msg)
log.warning(msg, extra=log_params(self.request))
except Exception as e:
messages.error(request, e)
log.warning(force_text(e), extra=log_params(self.request))
return HttpResponseRedirect(self.success_url)
def get(self, request, *args, **kwargs):
return self.delete(request, *args, **kwargs)
# class Diagnostico==============================================================================
class DiagnosticoListView(ListView):
model = Diagnostico
template_name = 'diagnostico/diagnostico_list.html'
paginate_by = settings.PER_PAGE
@method_decorator(permission_resource_required)
def dispatch(self, request, *args, **kwargs):
return super(DiagnosticoListView, self).dispatch(request, *args, **kwargs)
def get_paginate_by(self, queryset):
if 'all' in self.request.GET:
return None
return ListView.get_paginate_by(self, queryset)
def get_queryset(self):
self.o = empty(self.request, 'o', '-id')
self.f = empty(self.request, 'f', 'codigo')
self.q = empty(self.request, 'q', '')
column_contains = u'%s__%s' % (self.f, 'contains')
return self.model.objects.filter(**{column_contains: self.q}).order_by(self.o)
def get_context_data(self, **kwargs):
context = super(DiagnosticoListView, self).get_context_data(**kwargs)
context['opts'] = self.model._meta
context['cmi'] = 'diagnostio'
context['title'] = _('Select %s to change') % capfirst(_('Diagnostico'))
context['o'] = self.o
context['f'] = self.f
context['q'] = self.q.replace('/', '-')
return context
class DiagnosticoCreateView(CreateView):
model = Diagnostico
form_class = DiagnosticoForm
template_name = 'diagnostico/diagnostico_add.html'
success_url = reverse_lazy('atencion:diagnostico_list')
@method_decorator(permission_resource_required )
def dispatch(self, request, *args, **kwargs):
return super(DiagnosticoCreateView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(DiagnosticoCreateView, self).get_context_data(**kwargs)
context['opts'] = self.model._meta
context['cmi'] = 'diagnostico'
context['title'] = ('Agregar %s') % ('Diagnostico')
return context
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.usuario = self.request.user
msg = _(' %(name)s "%(obj)s" fue creado satisfactoriamente.') % {
'name': capfirst(force_text(self.model._meta.verbose_name)),
'obj': force_text(self.object)
}
if self.object.id:
messages.success(self.request, msg)
log.warning(msg, extra=log_params(self.request))
return super(DiagnosticoCreateView, self).form_valid(form)
class DiagnosticoUpdateView(UpdateView):
model = Diagnostico
template_name = 'diagnostico/diagnostico_add.html'
form_class = DiagnosticoForm
success_url = reverse_lazy('atencion:diagnostico_list')
@method_decorator(permission_resource_required )
def dispatch(self, request, *args, **kwargs):
return super(DiagnosticoUpdateView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(DiagnosticoUpdateView, self).get_context_data(**kwargs)
context['opts'] = self.model._meta
context['cmi'] = 'diagnostico'
context['title'] = _('Add %s') % _('Diagnostico')
return context
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.usuario = self.request.user
msg = _('%(name)s "%(obj)s" fue cambiado satisfactoriamente.') % {
'name': capfirst(force_text(self.model._meta.verbose_name)),
'obj': force_text(self.object)
}
if self.object.id:
messages.success(self.request, msg)
log.warning(msg, extra=log_params(self.request))
return super(DiagnosticoUpdateView, self).form_valid(form)
class DiagnosticoDeleteView(DeleteView):
model = Diagnostico
success_url = reverse_lazy('atencion:pdiagnostico_list')
@method_decorator(permission_resource_required)
def dispatch(self, request, *args, **kwargs):
try:
self.get_object()
except Exception as e:
messages.error(self.request, e)
log.warning(force_text(e), extra=log_params(self.request))
return HttpResponseRedirect(self.success_url)
return super(DiagnosticoDeleteView, self).dispatch(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
try:
d = self.get_object()
deps, msg = get_dep_objects(d)
print(deps)
if deps:
messages.warning(self.request, ('No se puede Eliminar %(name)s') % {
"name": capfirst(force_text(self.model._meta.verbose_name))
+ ' "' + force_text(d) + '"'
})
raise Exception(msg)
d.delete()
msg = _(' %(name)s "%(obj)s" fue eliminado satisfactoriamente.') % {
'name': capfirst(force_text(self.model._meta.verbose_name)),
'obj': force_text(d)
}
if not d.id:
messages.success(self.request, msg)
log.warning(msg, extra=log_params(self.request))
except Exception as e:
messages.error(request, e)
log.warning(force_text(e), extra=log_params(self.request))
return HttpResponseRedirect(self.success_url)
def get(self, request, *args, **kwargs):
return self.delete(request, *args, **kwargs)
# UnidadMedida==============================================================================
class UnidadMedidaListView(ListView):
model = UnidadMedida
template_name = 'unidad_medida/unidadmedida_list.html'
paginate_by = settings.PER_PAGE
@method_decorator(permission_resource_required)
def dispatch(self, request, *args, **kwargs):
return super(UnidadMedidaListView, self).dispatch(request, *args, **kwargs)
def get_paginate_by(self, queryset):
if 'all' in self.request.GET:
return None
return ListView.get_paginate_by(self, queryset)
def get_queryset(self):
self.o = empty(self.request, 'o', '-id')
self.f = empty(self.request, 'f', 'codigo')
self.q = empty(self.request, 'q', '')
column_contains = u'%s__%s' % (self.f, 'contains')
return self.model.objects.filter(**{column_contains: self.q}).order_by(self.o)
def get_context_data(self, **kwargs):
context = super(UnidadMedidaListView, self).get_context_data(**kwargs)
context['opts'] = self.model._meta
context['cmi'] = 'unidadmedida'
context['title'] = _('Select %s to change') % capfirst(_('UnidadMedida'))
context['o'] = self.o
context['f'] = self.f
context['q'] = self.q.replace('/', '-')
return context
class UnidadMedidaCreateView(CreateView):
model = UnidadMedida
form_class = UnidadMedidaForm
template_name = 'unidad_medida/unidadmedida_add.html'
success_url = reverse_lazy('atencion:unidadmedida_list')
@method_decorator(permission_resource_required )
def dispatch(self, request, *args, **kwargs):
return super(UnidadMedidaCreateView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(UnidadMedidaCreateView, self).get_context_data(**kwargs)
context['opts'] = self.model._meta
context['cmi'] = 'unidadmedida'
context['title'] = ('Agregar %s') % ('UnidadMedida')
return context
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.usuario = self.request.user
msg = _(' %(name)s "%(obj)s" fue creado satisfactoriamente.') % {
'name': capfirst(force_text(self.model._meta.verbose_name)),
'obj': force_text(self.object)
}
if self.object.id:
messages.success(self.request, msg)
log.warning(msg, extra=log_params(self.request))
return super(UnidadMedidaCreateView, self).form_valid(form)
class UnidadMedidaUpdateView(UpdateView):
model = UnidadMedida
template_name = 'unidad_medida/unidadmedida_add.html'
form_class = UnidadMedidaForm
success_url = reverse_lazy('atencion:unidadmedida_list')
@method_decorator(permission_resource_required )
def dispatch(self, request, *args, **kwargs):
return super(UnidadMedidaUpdateView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(UnidadMedidaUpdateView, self).get_context_data(**kwargs)
context['opts'] = self.model._meta
context['cmi'] = 'unidadmedida'
context['title'] = _('Add %s') % _('UnidadMedida')
return context
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.usuario = self.request.user
msg = _('%(name)s "%(obj)s" fue cambiado satisfactoriamente.') % {
'name': capfirst(force_text(self.model._meta.verbose_name)),
'obj': force_text(self.object)
}
if self.object.id:
messages.success(self.request, msg)
log.warning(msg, extra=log_params(self.request))
return super(UnidadMedidaUpdateView, self).form_valid(form)
class UnidadMedidaDeleteView(DeleteView):
model = UnidadMedida
success_url = reverse_lazy('atencion:periodo_list')
@method_decorator(permission_resource_required)
def dispatch(self, request, *args, **kwargs):
try:
self.get_object()
except Exception as e:
messages.error(self.request, e)
log.warning(force_text(e), extra=log_params(self.request))
return HttpResponseRedirect(self.success_url)
return super(UnidadMedidaDeleteView, self).dispatch(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
try:
d = self.get_object()
deps, msg = get_dep_objects(d)
print(deps)
if deps:
messages.warning(self.request, ('No se puede Eliminar %(name)s') % {
"name": capfirst(force_text(self.model._meta.verbose_name))
+ ' "' + force_text(d) + '"'
})
raise Exception(msg)
d.delete()
msg = _(' %(name)s "%(obj)s" fue eliminado satisfactoriamente.') % {
'name': capfirst(force_text(self.model._meta.verbose_name)),
'obj': force_text(d)
}
if not d.id:
messages.success(self.request, msg)
log.warning(msg, extra=log_params(self.request))
except Exception as e:
messages.error(request, e)
log.warning(force_text(e), extra=log_params(self.request))
return HttpResponseRedirect(self.success_url)
def get(self, request, *args, **kwargs):
return self.delete(request, *args, **kwargs)
# class reportes==============================================================================
from highcharts.views import HighChartsLineView, HighChartsBarView
class BarView(HighChartsLineView):
categories = [1,2,3]
@property
def series(self):
consultas = Consulta.objects.extra({'atencion':"date(fecha)"}).values('atencion').annotate(count=Count('id'))[:3]
result = []
data = []
names = []
i = 0
while i<len(consultas):
data.append(consultas[i]['count'])
names.append(consultas[i]['atencion'])
result.append({'name': names , "data":data })
i = i+1
"""
while i < len(consultas):
data.append(consultas[i]['count'])
names.append(consultas[i]['atencion'])
result.append({'name':names , "data": data})
i = i + 1
"""
return result
def vista(request):
return render(request, 'reportes/atencion.html')
|
|
from __future__ import annotations
import procrunner
import pytest
import iotbx.mtz
from dxtbx.serialize import load
import xia2.Test.regression
def split_xinfo(data_dir, tmpdir):
split_xinfo_template = """/
BEGIN PROJECT AUTOMATIC
BEGIN CRYSTAL DEFAULT
BEGIN WAVELENGTH NATIVE
WAVELENGTH 0.979500
END WAVELENGTH NATIVE
BEGIN SWEEP SWEEP1
WAVELENGTH NATIVE
DIRECTORY {0}
IMAGE X4_wide_M1S4_2_0001.cbf
START_END 1 40
BEAM 219.84 212.65
END SWEEP SWEEP1
BEGIN SWEEP SWEEP2
WAVELENGTH NATIVE
DIRECTORY {0}
IMAGE X4_wide_M1S4_2_0001.cbf
START_END 45 90
BEAM 219.84 212.65
END SWEEP SWEEP2
END CRYSTAL DEFAULT
END PROJECT AUTOMATIC
"""
xinfo_file = tmpdir / "split.xinfo"
xinfo_file.write(
split_xinfo_template.format(data_dir.strpath.replace("\\", "\\\\"))
)
return xinfo_file.strpath
@pytest.mark.parametrize("pipeline,scaler", (("dials", "xdsa"), ("3dii", "dials")))
def test_incompatible_pipeline_scaler(pipeline, scaler, tmpdir, ccp4):
command_line = ["xia2", "pipeline=%s" % pipeline, "nproc=1", "scaler=%s" % scaler]
result = procrunner.run(command_line, working_directory=tmpdir)
assert result.returncode
assert "Error: scaler=%s not compatible with pipeline=%s" % (
scaler,
pipeline,
) in result.stdout.decode("latin-1")
def test_dials_aimless(regression_test, dials_data, tmpdir, ccp4):
command_line = [
"xia2",
"pipeline=dials-aimless",
"nproc=1",
"trust_beam_centre=True",
"read_all_image_headers=False",
"truncate=cctbx",
dials_data("x4wide").strpath,
]
result = procrunner.run(command_line, working_directory=tmpdir)
success, issues = xia2.Test.regression.check_result(
"X4_wide.dials-aimless", result, tmpdir, ccp4, expected_space_group="P41212"
)
assert success, issues
def test_dials_aimless_with_dials_pipeline(regression_test, dials_data, tmpdir, ccp4):
# This should be functionally equivalent to 'test_dials_aimless' above
command_line = [
"xia2",
"pipeline=dials",
"scaler=ccp4a",
"nproc=1",
"trust_beam_centre=True",
"read_all_image_headers=False",
"truncate=cctbx",
dials_data("x4wide").strpath,
]
result = procrunner.run(command_line, working_directory=tmpdir)
success, issues = xia2.Test.regression.check_result(
"X4_wide.dials-aimless", result, tmpdir, ccp4
)
assert success, issues
def test_dials(regression_test, dials_data, tmpdir, ccp4):
command_line = [
"xia2",
"pipeline=dials",
"nproc=1",
"trust_beam_centre=True",
"read_all_image_headers=False",
"truncate=cctbx",
"free_total=1000",
"project=foo",
"crystal=bar",
dials_data("x4wide").strpath,
]
result = procrunner.run(command_line, working_directory=tmpdir)
scaled_expt_file = tmpdir / "DataFiles" / "foo_bar_scaled.expt"
assert scaled_expt_file.check(file=1)
scaled_expt = load.experiment_list(scaled_expt_file.strpath)
for crystal in scaled_expt.crystals():
assert crystal.get_recalculated_unit_cell() is not None
assert len(crystal.get_recalculated_cell_parameter_sd()) == 6
assert crystal.get_recalculated_cell_volume_sd() > 0
for mtz_file in (
"foo_bar_scaled.mtz",
"foo_bar_free.mtz",
"foo_bar_scaled_unmerged.mtz",
):
mtz_obj = iotbx.mtz.object(tmpdir.join("DataFiles").join(mtz_file).strpath)
assert mtz_obj.crystals()[1].project_name() == "foo"
assert mtz_obj.crystals()[1].name() == "bar"
for ma in mtz_obj.as_miller_arrays():
assert ma.unit_cell().parameters() == pytest.approx(
scaled_expt[0].crystal.get_recalculated_unit_cell().parameters(),
abs=1e-4,
)
success, issues = xia2.Test.regression.check_result(
"X4_wide.dials",
result,
tmpdir,
ccp4,
expected_data_files=[
"foo_bar_scaled.mtz",
"foo_bar_scaled_unmerged.mtz",
"foo_bar_scaled.sca",
"foo_bar_scaled_unmerged.sca",
],
expected_space_group="P41212",
)
assert success, issues
def test_dials_aimless_split(regression_test, dials_data, tmpdir, ccp4):
command_line = [
"xia2",
"pipeline=dials-aimless",
"nproc=1",
"njob=2",
"mode=parallel",
"trust_beam_centre=True",
"xinfo=%s" % split_xinfo(dials_data("x4wide"), tmpdir),
]
result = procrunner.run(command_line, working_directory=tmpdir)
success, issues = xia2.Test.regression.check_result(
"X4_wide_split.dials-aimless", result, tmpdir, ccp4
)
assert success, issues
def test_dials_split(regression_test, dials_data, tmpdir, ccp4):
command_line = [
"xia2",
"pipeline=dials",
"nproc=1",
"njob=2",
"trust_beam_centre=True",
"xinfo=%s" % split_xinfo(dials_data("x4wide"), tmpdir),
"mode=parallel",
]
result = procrunner.run(command_line, working_directory=tmpdir)
success, issues = xia2.Test.regression.check_result(
"X4_wide_split.dials",
result,
tmpdir,
ccp4,
expected_data_files=[
"AUTOMATIC_DEFAULT_scaled.mtz",
"AUTOMATIC_DEFAULT_scaled_unmerged.mtz",
],
)
assert success, issues
def test_xds(regression_test, dials_data, tmpdir, ccp4, xds):
command_line = [
"xia2",
"pipeline=3di",
"nproc=1",
"trust_beam_centre=True",
"read_all_image_headers=False",
dials_data("x4wide").strpath,
]
result = procrunner.run(command_line, working_directory=tmpdir)
success, issues = xia2.Test.regression.check_result(
"X4_wide.xds", result, tmpdir, ccp4, xds, expected_space_group="P41212"
)
assert success, issues
def test_xds_split(regression_test, dials_data, tmpdir, ccp4, xds):
command_line = [
"xia2",
"pipeline=3di",
"nproc=1",
"njob=2",
"mode=parallel",
"trust_beam_centre=True",
"xinfo=%s" % split_xinfo(dials_data("x4wide"), tmpdir),
]
result = procrunner.run(command_line, working_directory=tmpdir)
success, issues = xia2.Test.regression.check_result(
"X4_wide_split.xds", result, tmpdir, ccp4, xds
)
assert success, issues
def test_xds_ccp4a(regression_test, dials_data, tmpdir, ccp4, xds):
command_line = [
"xia2",
"pipeline=3di",
"nproc=1",
"scaler=ccp4a",
"trust_beam_centre=True",
dials_data("x4wide").strpath,
]
result = procrunner.run(command_line, working_directory=tmpdir)
success, issues = xia2.Test.regression.check_result(
"X4_wide.ccp4a", result, tmpdir, ccp4, xds
)
assert success, issues
def test_xds_ccp4a_split(regression_test, dials_data, tmpdir, ccp4, xds):
command_line = [
"xia2",
"pipeline=3di",
"nproc=1",
"scaler=ccp4a",
"njob=2",
"merging_statistics.source=aimless",
"trust_beam_centre=True",
"mode=parallel",
"xinfo=%s" % split_xinfo(dials_data("x4wide"), tmpdir),
]
result = procrunner.run(command_line, working_directory=tmpdir)
success, issues = xia2.Test.regression.check_result(
"X4_wide_split.ccp4a", result, tmpdir, ccp4, xds
)
assert success, issues
@pytest.mark.parametrize("space_group", ("P41212", "P422"))
@pytest.mark.parametrize("pipeline", ("dials", "dials-aimless"))
def test_space_group_dials(
pipeline, space_group, regression_test, dials_data, tmpdir, ccp4
):
command_line = [
"xia2",
"pipeline=%s" % pipeline,
"space_group=%s" % space_group,
"nproc=1",
"trust_beam_centre=True",
"read_all_image_headers=False",
"truncate=cctbx",
"free_total=1000",
"image=%s" % dials_data("x4wide").join("X4_wide_M1S4_2_0001.cbf:20:30"),
]
result = procrunner.run(command_line, working_directory=tmpdir)
success, issues = xia2.Test.regression.check_result(
"X4_wide.space_group.%s" % pipeline,
result,
tmpdir,
ccp4,
expected_space_group=space_group,
)
assert success, issues
@pytest.mark.parametrize("space_group", ("P41212", "P422"))
def test_space_group_3dii(space_group, regression_test, dials_data, tmpdir, ccp4, xds):
command_line = [
"xia2",
"pipeline=3dii",
"space_group=%s" % space_group,
"nproc=1",
"trust_beam_centre=True",
"read_all_image_headers=False",
"truncate=cctbx",
"free_total=1000",
"image=%s" % dials_data("x4wide").join("X4_wide_M1S4_2_0001.cbf:20:30"),
]
result = procrunner.run(command_line, working_directory=tmpdir)
success, issues = xia2.Test.regression.check_result(
"X4_wide.space_group.3dii",
result,
tmpdir,
ccp4,
xds=xds,
expected_space_group=space_group,
)
assert success, issues
|
|
import json, os, os.path
from bsddb3 import db
from Catalog.Schema import DBSchema, DBSchemaEncoder, DBSchemaDecoder
from Catalog.Identifiers import FileId, PageId, TupleId
class IndexManager:
"""
An index manager class.
This provides indexes on top of the heap files in our storage layer with BerkeleyDB.
Each index object is a BerkeleyDB database object whose values are tuple identifiers
objects. In this way, each index is an unclustered index that must perform a random
I/O operation to subsequently retrieve the page and tuple from our storage layer.
The index manager class provides facilities to create and remove both primary and
secondary indexes. Keys for a primary indexes must be unique, while secondary indexes
may optionally specify unique or non-unique keys. A relation can have at most one
primary index.
The index manager maintains two internal data structures: relationIndexes and indexMap.
The latter is a dictionary mapping an index id to a BerkeleyDB object.
The former is a dictionary mapping a relation name to a triple of relation schema,
primary index id and key schema, and a dictionary of secondary index ids by
their key schema. Index ids are returned on index construction and must be used
to retrieve the index object.
Indexes provide both scan and lookup operations, as well as modifications.
Index maintenance is performed through the insertTuple, deleteTuple and updateTuple
methods. These are invoked on indexes by the file manager when modifying the
underyling storage file, to ensure the indexes are kept consistent.
These methods ensure that all indexes (both primary and secondaries) are maintained.
In a similar fashion to the file manager, the index manager checkpoints its
internal data structures to disk.
>>> im = IndexManager()
## Test low-level BDB database operations
# Test index creation
>>> indexDb = im.createIndexDB('test.db')
>>> indexDb.get_dbname()
('test.db', None)
# Test index close and reopen
>>> im.closeIndexDB(indexDb)
>>> indexDb2 = im.openIndexDB('test.db')
>>> indexDb2.get_dbname()
('test.db', None)
# Test index removal
>>> im.removeIndexDB(indexDb2)
## Test index operations
>>> schema = DBSchema('employee', [('id', 'int'), ('age', 'int'), ('salary', 'double')])
>>> keySchema = DBSchema('employeeKey', [('id', 'int')])
>>> ageSchema = DBSchema('employeeAge', [('age', 'int')])
# Test index addition
>>> indexId1 = im.createIndex(schema.name, schema, keySchema, True)
>>> indexId2 = im.createIndex(schema.name, schema, ageSchema, False)
>>> im.indexes(schema.name) # doctest:+ELLIPSIS
[(..., True, 1), (..., False, 2)]
>>> im.hasIndex(schema.name, keySchema)
True
>>> im.hasIndex(schema.name, ageSchema)
True
# Test index retrieval
>>> im.getIndex(indexId1).get_dbname()
('employee_idx1', None)
>>> im.getIndex(indexId2).get_dbname()
('employee_idx2', None)
# Test index matching
>>> im.matchIndex(schema.name, DBSchema('foo', [('age', 'int')]))
2
## Data operations: test data insertion/deletion/lookup on all indexes
# Insert a tuple
>>> pageId = PageId(FileId(0), 1)
>>> e1Id = TupleId(pageId, 1000)
>>> e1Data = schema.pack(schema.instantiate(1, 25, 100000))
>>> im.insertTuple(schema.name, e1Data, e1Id)
# Look up that tuple in both indexes
>>> idx1Key = schema.projectBinary(e1Data, keySchema)
>>> [(tId.pageId.pageIndex, tId.tupleIndex) \
for tId in im.lookupByIndex(indexId1, idx1Key)]
[(1, 1000)]
>>> idx2Key = schema.projectBinary(e1Data, ageSchema)
>>> [(tId.pageId.pageIndex, tId.tupleIndex) \
for tId in im.lookupByIndex(indexId2, idx2Key)]
[(1, 1000)]
# Update the tuple contents, changing the age field.
# This should cause no change in the primary index (by employee id),
# but should invalidate the secondary index entry (based on age).
>>> e1NewDataNewKey = schema.pack(schema.instantiate(1, 30, 90000))
>>> im.updateTuple(schema.name, e1Data, e1NewDataNewKey, e1Id)
# Look up the old tuple in both indexes
>>> idx1Key = schema.projectBinary(e1Data, keySchema)
>>> [(tId.pageId.pageIndex, tId.tupleIndex) \
for tId in im.lookupByIndex(indexId1, idx1Key)]
[(1, 1000)]
>>> idx2Key = schema.projectBinary(e1Data, ageSchema)
>>> list(im.lookupByIndex(indexId2, idx2Key))
[]
# Look up the new tuple in both indexes
>>> idx1Key = schema.projectBinary(e1NewDataNewKey, keySchema)
>>> [(tId.pageId.pageIndex, tId.tupleIndex) \
for tId in im.lookupByIndex(indexId1, idx1Key)]
[(1, 1000)]
>>> idx2Key = schema.projectBinary(e1NewDataNewKey, ageSchema)
>>> [(tId.pageId.pageIndex, tId.tupleIndex) \
for tId in im.lookupByIndex(indexId2, idx2Key)]
[(1, 1000)]
# Delete an indexed tuple
>>> im.deleteTuple(schema.name, e1NewDataNewKey, e1Id)
# Ensure that the lookup returns no tuples.
>>> idx1Key = schema.projectBinary(e1NewDataNewKey, keySchema)
>>> list(im.lookupByIndex(indexId1, idx1Key))
[]
>>> idx2Key = schema.projectBinary(e1NewDataNewKey, ageSchema)
>>> list(im.lookupByIndex(indexId2, idx2Key))
[]
## Index scan tests
# Add many tuples.
>>> testTuples = []
>>> for i in range(10):
... dataIdPair = (schema.pack(schema.instantiate(i, 2*i+20, 5000*(10+i))), TupleId(pageId, i))
... testTuples.append(dataIdPair)
...
>>> for (tup, tupId) in testTuples:
... _ = im.insertTuple(schema.name, tup, tupId)
...
# Scan by both indexes, ensuring they are sorted on their search key.
>>> [keySchema.unpack(k).id for (k,_) in im.scanByIndex(indexId1)] # doctest:+ELLIPSIS
[0, 1, 2, ..., 9]
>>> [ageSchema.unpack(k).age for (k,_) in im.scanByIndex(indexId2)] # doctest:+ELLIPSIS
[20, 22, 24, ..., 38]
# Test index removal
>>> im.removeIndex(schema.name, indexId1)
>>> im.indexes(schema.name) # doctest:+ELLIPSIS
[(..., False, 2)]
>>> im.removeIndex(schema.name, indexId2)
>>> im.indexes(schema.name) # doctest:+ELLIPSIS
[]
"""
defaultIndexDir = "data/index"
checkpointEncoding = "latin1"
checkpointFile = "db.im"
def __init__(self, **kwargs):
other = kwargs.get("other", None)
if other:
self.fromOther(other)
else:
self.indexDir = kwargs.get("indexDir", IndexManager.defaultIndexDir)
checkpointFound = os.path.exists(os.path.join(self.indexDir, IndexManager.checkpointFile))
restoring = "restore" in kwargs
if not os.path.exists(self.indexDir):
os.makedirs(self.indexDir)
if restoring or not checkpointFound:
self.indexCounter = kwargs.get("indexCounter", 0)
self.relationIndexes = kwargs.get("relationIndexes", {}) # rel id -> (relation schema, primary, dict(secondaries))
self.indexMap = kwargs.get("indexMap", {}) # index id -> DB object
self.initializeDB(self.indexDir)
if restoring:
# Initialize relationIndexes and indexMap from restore data.
for i in kwargs["restore"][0]:
self.relationIndexes[i[0]] = (i[1][0], i[1][1], dict(i[1][2]))
for i in kwargs["restore"][1]:
self.indexMap[i[0]] = self.openIndexDB(i[1])
else:
self.restore()
def fromOther(self, other):
self.indexDir = other.indexDir
self.indexCounter = other.indexCounter
self.relationIndexes = other.relationIndexes
self.indexMap = other.indexMap
self.env = other.env
# Save the index manager internals to the data directory.
def checkpoint(self):
imPath = os.path.join(self.indexDir, IndexManager.checkpointFile)
with open(imPath, 'w', encoding=IndexManager.checkpointEncoding) as f:
f.write(self.pack())
# Load indexes from an existing data directory.
def restore(self):
imPath = os.path.join(self.indexDir, IndexManager.checkpointFile)
with open(imPath, 'r', encoding=IndexManager.checkpointEncoding) as f:
other = IndexManager.unpack(f.read())
self.fromOther(other)
# Berkeley DB utility methods.
# Initializes a new BerkeleyDB environment and database to store a set of indexes.
def initializeDB(self, dbDir):
self.env = db.DBEnv()
envFlags = db.DB_CREATE | db.DB_INIT_MPOOL
self.env.open(dbDir, envFlags)
# TODO: set duplicate flags for secondary indexes as needed.
def createIndexDB(self, filename):
indexDb = db.DB(dbEnv=self.env)
dbFlags = db.DB_CREATE | db.DB_TRUNCATE
indexDb.open(filename, db.DB_BTREE, dbFlags)
return indexDb
def openIndexDB(self, filename):
indexDb = db.DB(dbEnv=self.env)
indexDb.open(filename, db.DB_BTREE)
return indexDb
def closeIndexDB(self, indexDb):
indexDb.close()
def removeIndexDB(self, indexDb):
filename, _ = indexDb.get_dbname()
self.closeIndexDB(indexDb)
self.env.dbremove(filename)
# Index identifier methods.
def indexFileName(self, relId, indexId):
return relId+"_idx"+str(indexId)
# Generates a filename for the index.
def generateIndexFileName(self, relId):
self.indexCounter += 1
return (self.indexCounter, self.indexFileName(relId, self.indexCounter))
# Index management methods
# Returns whether the relation has any indexes initialized.
def hasIndexes(self, relId):
return relId in self.relationIndexes and self.relationIndexes[relId]
# Returns the indexes available on a relation as a triple of (schema, primary, index id)
def indexes(self, relId):
if self.hasIndexes(relId):
_, primary, secondaries = self.relationIndexes[relId]
firstElem = [(primary[0], True, primary[1])] if primary else []
return firstElem + list(map(lambda x: (x[0], False, x[1]), secondaries.items()))
return []
# Returns whether an index on the given key exists for a relation.
# Key schemas are treated as lists rather than sets, that is a schema
# with the same ordering of attributes must exist in the IndexManager.
def hasIndex(self, relId, keySchema):
if self.hasIndexes(relId):
_, primary, secondaries = self.relationIndexes[relId]
return (primary is not None and primary[0] == keySchema) or keySchema in secondaries
return False
def checkDuplicateIndex(self, relId, keySchema, primary):
errorMsg = None
if self.hasIndexes(relId):
_, prim, _ = self.relationIndexes[relId]
if primary and prim is not None:
errorMsg = "Invalid construction of a duplicate primary index"
elif self.hasIndex(relId, keySchema):
errorMsg = "Invalid construction of a duplicate index"
return errorMsg
# Creates a new index for the given key as a BDB database.
# Returns the index id of a newly created index from key -> relation
# If the index is indicated to be a primary index, the values are tuple identifiers,
# while for secondary indexes, the values are sets of tuple identifiers.
# This method should ensure that no relation has two primary indexes.
def createIndex(self, relId, relSchema, keySchema, primary):
# Check if this is a duplicate index and abort.
errorMsg = self.checkDuplicateIndex(relId, keySchema, primary)
if errorMsg:
raise ValueError(errorMsg)
indexId, indexFile = self.generateIndexFileName(relId)
indexDb = self.createIndexDB(indexFile)
self.indexMap[indexId] = indexDb
# Add the new index to the relationFiles data structure.
if primary:
schema, _, secondaries = \
self.relationIndexes[relId] if self.hasIndexes(relId) else (relSchema, None, {})
self.relationIndexes[relId] = (schema, (keySchema, indexId), secondaries)
else:
if not self.hasIndexes(relId):
self.relationIndexes[relId] = (relSchema, None, {})
self.relationIndexes[relId][2][keySchema] = indexId
self.checkpoint()
return indexId
# Adds a pre-existing BDB index to the database.
def addIndex(self, relId, relSchema, keySchema, primary, indexId, indexDb):
if indexId not in self.indexMap:
# Check if this is a duplicate index and abort.
errorMsg = self.checkDuplicateIndex(relId, keySchema, primary)
if errorMsg:
raise ValueError(errorMsg)
self.indexCounter = max(self.indexCounter, indexId+1)
self.indexMap[indexId] = indexDb
# Add the new index to the relationFiles data structure.
if primary:
schema, _, secondaries = \
self.relationIndexes[relId] if self.hasIndexes(relId) else (relSchema, None, {})
self.relationIndexes[relId] = (schema, (keySchema, indexId), secondaries)
else:
if not self.hasIndexes(relId):
self.relationIndexes[relId] = (relSchema, None, {})
self.relationIndexes[relId][2][keySchema] = indexId
self.checkpoint()
# Returns the index (i.e., BDB database object) corresponding to the index id.
def getIndex(self, indexId):
if indexId in self.indexMap:
return self.indexMap[indexId]
# Removes or detaches the index (i.e., BDB database object) for the given relation.
def removeIndex(self, relId, indexId, detach=False):
if self.hasIndexes(relId):
schema, primary, secondaries = self.relationIndexes[relId]
if primary and primary[1] == indexId:
self.relationIndexes[relId] = (schema, None, secondaries)
else:
self.relationIndexes[relId] = \
(schema, primary, dict(filter(lambda x: x[1] != indexId, secondaries.items())))
# Clean up relationIndexes entries when no primary or secondary is present.
if self.relationIndexes[relId][1] is None and not self.relationIndexes[relId][2]:
del self.relationIndexes[relId]
if indexId in self.indexMap:
indexDb = self.indexMap.pop(indexId, None)
if indexDb and detach:
self.closeIndexDB(indexDb)
elif indexDb:
self.removeIndexDB(indexDb)
self.checkpoint()
# Returns the index id of the best matching index
# For now, this requires an exact match on the schema fields and types, but not the name.
def matchIndex(self, relId, keySchema):
indexes = self.indexes(relId)
if indexes:
return next((x[2] for x in indexes if keySchema.match(x[0])), None)
# Auxiliary index helpers.
def hasPrimaryIndex(self, relId):
return self.hasIndexes(relId) and self.relationIndexes[relId][1] is not None
def getPrimaryIndex(self, relId):
if self.hasIndexes(relId):
_, primary, _ = self.relationIndexes[relId]
return self.getIndex(primary[0])
# Index access methods.
# Updates all indexes on the relation to add the new tuple.
# The key for each index should be extracted from the full tuple given in tupleData.
def insertTuple(self, relId, tupleData, tupleId):
if self.hasIndexes(relId):
schema, _, _ = self.relationIndexes[relId]
indexes = self.indexes(relId)
if indexes:
for (keySchema, primary, indexId) in indexes:
indexDb = self.getIndex(indexId)
if indexDb is not None:
indexKey = schema.projectBinary(tupleData, keySchema)
putFlags = db.DB_NOOVERWRITE if primary else 0
indexDb.put(indexKey, tupleId.pack(), flags=putFlags)
# Updates all indexes on the relation to remove the given tuple.
# The key for each index should be extracted from the full tuple given in tupleData.
def deleteTuple(self, relId, tupleData, tupleId):
if self.hasIndexes(relId):
schema, _, _ = self.relationIndexes[relId]
indexes = self.indexes(relId)
if indexes:
for (keySchema, primary, indexId) in indexes:
indexDb = self.getIndex(indexId)
if indexDb is not None:
indexKey = schema.projectBinary(tupleData, keySchema)
if primary:
indexDb.delete(indexKey)
else:
# Delete only the tuple matching the given tuple id.
crsr = indexDb.cursor()
found = crsr.get_both(indexKey, tupleId.pack())
if found:
crsr.delete()
crsr.close()
# Updates all indexes on the relation to refresh the given tuple.
# The old and new keys for each index should be extracted from the full tuples.
# For each index, based on whether the key is changing, this method should issue
# the appropriate DB delete+insert calls.
# Note: since our storage engine uses heap files only, the tuple id itself should not change.
def updateTuple(self, relId, oldData, newData, tupleId):
if self.hasIndexes(relId):
schema, _, _ = self.relationIndexes[relId]
indexes = self.indexes(relId)
if indexes:
for (keySchema, primary, indexId) in indexes:
indexDb = self.getIndex(indexId)
if indexDb is not None:
oldKey = schema.projectBinary(oldData, keySchema)
newKey = schema.projectBinary(newData, keySchema)
# If the keys are the same, we do not need to perform any operations.
# That is, we assume the tuple id argument is the same as the existing
# entry (since this is a tuple id), and we do not check this.
if oldKey == newKey:
pass
# Insert a new index entry if the key has changed.
else:
if primary:
indexDb.delete(oldKey)
indexDb.put(newKey, tupleId.pack(), flags=db.DB_NOOVERWRITE)
else:
# Update only the tuple matching the given tuple id.
crsr = indexDb.cursor()
found = crsr.get_both(oldKey, tupleId.pack())
if found:
crsr.delete()
crsr.put(newKey, tupleId.pack(), flags=db.DB_KEYLAST)
# TODO: flags based on whether the secondary index is unique?
crsr.close()
# Lookup methods.
# Perform an index lookup for the given key.
# This returns an iterator over tuple ids.
# TODO: this materializes all tuple ids.
# TODO: Use an iterator abstraction to avoid this.
def lookupByIndex(self, indexId, keyData):
result = []
indexDb = self.getIndex(indexId)
if indexDb is not None:
crsr = indexDb.cursor()
data = crsr.set(keyData)
while data and data[0] == keyData:
result.append(TupleId.unpack(data[1]))
data = crsr.next()
crsr.close()
return iter(result)
# Retrieve a tuple based on its key.
# This method returns None if the relation does not have a primary index,
# or if the key does not exist in the index.
# Otherwise it returns a single tuple identifier.
def lookupByKey(self, relId, keyData):
indexDb = self.getPrimaryIndex(relId)
if indexDb:
return TupleId.unpack(indexDb.get(keyData))
# Index scan operations.
# These return an ordered iterator of (key, tuple id) pairs
# Scan over a specific index.
def scanByIndex(self, indexId):
indexDb = self.getIndex(indexId)
if indexDb is not None:
return iter(indexDb.items())
# Scan over the primary index for a relation.
def scanByKey(self, relId):
indexDb = self.getPrimaryIndex(relId)
if indexDb is not None:
return iter(indexDb.items())
# Index manager serialization
def packSchema(self, schema):
return (schema.name, schema.schema())
def pack(self):
if self.relationIndexes is not None and self.indexMap is not None:
# Convert secondaries dictionary to a list since it has an object as a key type (incompatible w/ JSON)
pRelIndexes = list(map(lambda x: (x[0], (x[1][0], x[1][1], list(x[1][2].items()))), self.relationIndexes.items()))
pIndexMap = list(map(lambda entry: (entry[0], entry[1].get_dbname()), self.indexMap.items()))
return json.dumps((self.indexDir, self.indexCounter, pRelIndexes, pIndexMap), cls=DBSchemaEncoder)
@classmethod
def unpack(cls, buffer):
args = json.loads(buffer, cls=DBSchemaDecoder)
if len(args) == 4:
return cls(indexDir=args[0], indexCounter=args[1], restore=(args[2], args[3]))
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
"""Asyncio backports for Python 3.4.3 compatibility."""
import concurrent.futures
import threading
import logging
from asyncio import coroutines
from asyncio.events import AbstractEventLoop
from asyncio.futures import Future
import asyncio
from asyncio import ensure_future
from typing import Any, Union, Coroutine, Callable, Generator, TypeVar, \
Awaitable
_LOGGER = logging.getLogger(__name__)
try:
# pylint: disable=invalid-name
asyncio_run = asyncio.run # type: ignore
except AttributeError:
_T = TypeVar('_T')
def asyncio_run(main: Awaitable[_T], *, debug: bool = False) -> _T:
"""Minimal re-implementation of asyncio.run (since 3.7)."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.set_debug(debug)
try:
return loop.run_until_complete(main)
finally:
asyncio.set_event_loop(None)
loop.close()
def _set_result_unless_cancelled(fut: Future, result: Any) -> None:
"""Set the result only if the Future was not cancelled."""
if fut.cancelled():
return
fut.set_result(result)
def _set_concurrent_future_state(
concurr: concurrent.futures.Future,
source: Union[concurrent.futures.Future, Future]) -> None:
"""Copy state from a future to a concurrent.futures.Future."""
assert source.done()
if source.cancelled():
concurr.cancel()
if not concurr.set_running_or_notify_cancel():
return
exception = source.exception()
if exception is not None:
concurr.set_exception(exception)
else:
result = source.result()
concurr.set_result(result)
def _copy_future_state(source: Union[concurrent.futures.Future, Future],
dest: Union[concurrent.futures.Future, Future]) -> None:
"""Copy state from another Future.
The other Future may be a concurrent.futures.Future.
"""
assert source.done()
if dest.cancelled():
return
assert not dest.done()
if source.cancelled():
dest.cancel()
else:
exception = source.exception()
if exception is not None:
dest.set_exception(exception)
else:
result = source.result()
dest.set_result(result)
def _chain_future(
source: Union[concurrent.futures.Future, Future],
destination: Union[concurrent.futures.Future, Future]) -> None:
"""Chain two futures so that when one completes, so does the other.
The result (or exception) of source will be copied to destination.
If destination is cancelled, source gets cancelled too.
Compatible with both asyncio.Future and concurrent.futures.Future.
"""
if not isinstance(source, (Future, concurrent.futures.Future)):
raise TypeError('A future is required for source argument')
if not isinstance(destination, (Future, concurrent.futures.Future)):
raise TypeError('A future is required for destination argument')
# pylint: disable=protected-access
if isinstance(source, Future):
source_loop = source._loop # type: ignore
else:
source_loop = None
if isinstance(destination, Future):
dest_loop = destination._loop # type: ignore
else:
dest_loop = None
def _set_state(future: Union[concurrent.futures.Future, Future],
other: Union[concurrent.futures.Future, Future]) -> None:
if isinstance(future, Future):
_copy_future_state(other, future)
else:
_set_concurrent_future_state(future, other)
def _call_check_cancel(
destination: Union[concurrent.futures.Future, Future]) -> None:
if destination.cancelled():
if source_loop is None or source_loop is dest_loop:
source.cancel()
else:
source_loop.call_soon_threadsafe(source.cancel)
def _call_set_state(
source: Union[concurrent.futures.Future, Future]) -> None:
if dest_loop is None or dest_loop is source_loop:
_set_state(destination, source)
else:
dest_loop.call_soon_threadsafe(_set_state, destination, source)
destination.add_done_callback(_call_check_cancel)
source.add_done_callback(_call_set_state)
def run_coroutine_threadsafe(
coro: Union[Coroutine, Generator],
loop: AbstractEventLoop) -> concurrent.futures.Future:
"""Submit a coroutine object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
if not coroutines.iscoroutine(coro):
raise TypeError('A coroutine object is required')
future = concurrent.futures.Future() # type: concurrent.futures.Future
def callback() -> None:
"""Handle the call to the coroutine."""
try:
_chain_future(ensure_future(coro, loop=loop), future)
except Exception as exc: # pylint: disable=broad-except
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(callback)
return future
def fire_coroutine_threadsafe(coro: Coroutine,
loop: AbstractEventLoop) -> None:
"""Submit a coroutine object to a given event loop.
This method does not provide a way to retrieve the result and
is intended for fire-and-forget use. This reduces the
work involved to fire the function on the loop.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
if not coroutines.iscoroutine(coro):
raise TypeError('A coroutine object is required: %s' % coro)
def callback() -> None:
"""Handle the firing of a coroutine."""
ensure_future(coro, loop=loop)
loop.call_soon_threadsafe(callback)
def run_callback_threadsafe(loop: AbstractEventLoop, callback: Callable,
*args: Any) -> concurrent.futures.Future:
"""Submit a callback object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
future = concurrent.futures.Future() # type: concurrent.futures.Future
def run_callback() -> None:
"""Run callback and store result."""
try:
future.set_result(callback(*args))
except Exception as exc: # pylint: disable=broad-except
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(run_callback)
return future
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.datacatalog_v1beta1.types import policytagmanagerserialization
from .base import PolicyTagManagerSerializationTransport, DEFAULT_CLIENT_INFO
from .grpc import PolicyTagManagerSerializationGrpcTransport
class PolicyTagManagerSerializationGrpcAsyncIOTransport(
PolicyTagManagerSerializationTransport
):
"""gRPC AsyncIO backend transport for PolicyTagManagerSerialization.
Policy tag manager serialization API service allows clients
to manipulate their taxonomies and policy tags data with
serialized format.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "datacatalog.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "datacatalog.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def import_taxonomies(
self,
) -> Callable[
[policytagmanagerserialization.ImportTaxonomiesRequest],
Awaitable[policytagmanagerserialization.ImportTaxonomiesResponse],
]:
r"""Return a callable for the import taxonomies method over gRPC.
Imports all taxonomies and their policy tags to a
project as new taxonomies.
This method provides a bulk taxonomy / policy tag
creation using nested proto structure.
Returns:
Callable[[~.ImportTaxonomiesRequest],
Awaitable[~.ImportTaxonomiesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "import_taxonomies" not in self._stubs:
self._stubs["import_taxonomies"] = self.grpc_channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.PolicyTagManagerSerialization/ImportTaxonomies",
request_serializer=policytagmanagerserialization.ImportTaxonomiesRequest.serialize,
response_deserializer=policytagmanagerserialization.ImportTaxonomiesResponse.deserialize,
)
return self._stubs["import_taxonomies"]
@property
def export_taxonomies(
self,
) -> Callable[
[policytagmanagerserialization.ExportTaxonomiesRequest],
Awaitable[policytagmanagerserialization.ExportTaxonomiesResponse],
]:
r"""Return a callable for the export taxonomies method over gRPC.
Exports all taxonomies and their policy tags in a
project.
This method generates SerializedTaxonomy protos with
nested policy tags that can be used as an input for
future ImportTaxonomies calls.
Returns:
Callable[[~.ExportTaxonomiesRequest],
Awaitable[~.ExportTaxonomiesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "export_taxonomies" not in self._stubs:
self._stubs["export_taxonomies"] = self.grpc_channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.PolicyTagManagerSerialization/ExportTaxonomies",
request_serializer=policytagmanagerserialization.ExportTaxonomiesRequest.serialize,
response_deserializer=policytagmanagerserialization.ExportTaxonomiesResponse.deserialize,
)
return self._stubs["export_taxonomies"]
def close(self):
return self.grpc_channel.close()
__all__ = ("PolicyTagManagerSerializationGrpcAsyncIOTransport",)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Policy Engine Implementation
Policies can be expressed in one of two forms: A list of lists, or a
string written in the new policy language.
In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass. These innermost lists are then
combined as with an "or" conjunction. This is the original way of
expressing policies, but there now exists a new way: the policy
language.
In the policy language, each check is specified the same way as in the
list-of-lists representation: a simple "a:b" pair that is matched to
the correct code to perform that check. However, conjunction
operators are available, allowing for more expressiveness in crafting
policies.
As an example, take the following rule, expressed in the list-of-lists
representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
In the policy language, this becomes::
role:admin or (project_id:%(project_id)s and role:projectadmin)
The policy language also has the "not" operator, allowing a richer
policy rule::
project_id:%(project_id)s and not role:dunce
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
always reject an access. (Note that if a rule is either the empty
list ("[]") or the empty string, this is equivalent to the "@" policy
check.) Of these, the "!" policy check is probably the most useful,
as it allows particular rules to be explicitly disabled.
"""
import abc
import re
import urllib
import urllib2
from oslo.config import cfg
import six
from heat.openstack.common import fileutils
from heat.openstack.common.gettextutils import _ # noqa
from heat.openstack.common import jsonutils
from heat.openstack.common import log as logging
policy_opts = [
cfg.StrOpt('policy_file',
default='policy.json',
help=_('JSON file containing policy')),
cfg.StrOpt('policy_default_rule',
default='default',
help=_('Rule enforced when requested rule is not found')),
]
CONF = cfg.CONF
CONF.register_opts(policy_opts)
LOG = logging.getLogger(__name__)
_checks = {}
class PolicyNotAuthorized(Exception):
def __init__(self, rule):
msg = _("Policy doesn't allow %s to be performed.") % rule
super(PolicyNotAuthorized, self).__init__(msg)
class Rules(dict):
"""A store for rules. Handles the default_rule setting directly."""
@classmethod
def load_json(cls, data, default_rule=None):
"""Allow loading of JSON rule data."""
# Suck in the JSON data and parse the rules
rules = dict((k, parse_rule(v)) for k, v in
jsonutils.loads(data).items())
return cls(rules, default_rule)
def __init__(self, rules=None, default_rule=None):
"""Initialize the Rules store."""
super(Rules, self).__init__(rules or {})
self.default_rule = default_rule
def __missing__(self, key):
"""Implements the default rule handling."""
if isinstance(self.default_rule, dict):
raise KeyError(key)
# If the default rule isn't actually defined, do something
# reasonably intelligent
if not self.default_rule or self.default_rule not in self:
raise KeyError(key)
if isinstance(self.default_rule, BaseCheck):
return self.default_rule
elif isinstance(self.default_rule, six.string_types):
return self[self.default_rule]
def __str__(self):
"""Dumps a string representation of the rules."""
# Start by building the canonical strings for the rules
out_rules = {}
for key, value in self.items():
# Use empty string for singleton TrueCheck instances
if isinstance(value, TrueCheck):
out_rules[key] = ''
else:
out_rules[key] = str(value)
# Dump a pretty-printed JSON representation
return jsonutils.dumps(out_rules, indent=4)
class Enforcer(object):
"""Responsible for loading and enforcing rules.
:param policy_file: Custom policy file to use, if none is
specified, `CONF.policy_file` will be
used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation. If
`load_rules(True)`, `clear()` or `set_rules(True)`
is called this will be overwritten.
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
"""
def __init__(self, policy_file=None, rules=None, default_rule=None):
self.rules = Rules(rules, default_rule)
self.default_rule = default_rule or CONF.policy_default_rule
self.policy_path = None
self.policy_file = policy_file or CONF.policy_file
def set_rules(self, rules, overwrite=True):
"""Create a new Rules object based on the provided dict of rules.
:param rules: New rules to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
"""
if not isinstance(rules, dict):
raise TypeError(_("Rules must be an instance of dict or Rules, "
"got %s instead") % type(rules))
if overwrite:
self.rules = Rules(rules, self.default_rule)
else:
self.rules.update(rules)
def clear(self):
"""Clears Enforcer rules, policy's cache and policy's path."""
self.set_rules({})
self.default_rule = None
self.policy_path = None
def load_rules(self, force_reload=False):
"""Loads policy_path's rules.
Policy file is cached and will be reloaded if modified.
:param force_reload: Whether to overwrite current rules.
"""
if not self.policy_path:
self.policy_path = self._get_policy_path()
reloaded, data = fileutils.read_cached_file(self.policy_path,
force_reload=force_reload)
if reloaded or not self.rules:
rules = Rules.load_json(data, self.default_rule)
self.set_rules(rules)
LOG.debug(_("Rules successfully reloaded"))
def _get_policy_path(self):
"""Locate the policy json data file.
:param policy_file: Custom policy file to locate.
:returns: The policy path
:raises: ConfigFilesNotFoundError if the file couldn't
be located.
"""
policy_file = CONF.find_file(self.policy_file)
if policy_file:
return policy_file
raise cfg.ConfigFilesNotFoundError((self.policy_file,))
def enforce(self, rule, target, creds, do_raise=False,
exc=None, *args, **kwargs):
"""Checks authorization of a rule against the target and credentials.
:param rule: A string or BaseCheck instance specifying the rule
to evaluate.
:param target: As much information about the object being operated
on as possible, as a dictionary.
:param creds: As much information about the user performing the
action as possible, as a dictionary.
:param do_raise: Whether to raise an exception or not if check
fails.
:param exc: Class of the exception to raise if the check fails.
Any remaining arguments passed to check() (both
positional and keyword arguments) will be passed to
the exception class. If not specified, PolicyNotAuthorized
will be used.
:return: Returns False if the policy does not allow the action and
exc is not provided; otherwise, returns a value that
evaluates to True. Note: for rules using the "case"
expression, this True value will be the specified string
from the expression.
"""
# NOTE(flaper87): Not logging target or creds to avoid
# potential security issues.
LOG.debug(_("Rule %s will be now enforced") % rule)
self.load_rules()
# Allow the rule to be a Check tree
if isinstance(rule, BaseCheck):
result = rule(target, creds, self)
elif not self.rules:
# No rules to reference means we're going to fail closed
result = False
else:
try:
# Evaluate the rule
result = self.rules[rule](target, creds, self)
except KeyError:
LOG.debug(_("Rule [%s] doesn't exist") % rule)
# If the rule doesn't exist, fail closed
result = False
# If it is False, raise the exception if requested
if do_raise and not result:
if exc:
raise exc(*args, **kwargs)
raise PolicyNotAuthorized(rule)
return result
class BaseCheck(object):
"""Abstract base class for Check classes."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __str__(self):
"""String representation of the Check tree rooted at this node."""
pass
@abc.abstractmethod
def __call__(self, target, cred, enforcer):
"""Triggers if instance of the class is called.
Performs the check. Returns False to reject the access or a
true value (not necessary True) to accept the access.
"""
pass
class FalseCheck(BaseCheck):
"""A policy check that always returns False (disallow)."""
def __str__(self):
"""Return a string representation of this check."""
return "!"
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return False
class TrueCheck(BaseCheck):
"""A policy check that always returns True (allow)."""
def __str__(self):
"""Return a string representation of this check."""
return "@"
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return True
class Check(BaseCheck):
"""A base class to allow for user-defined policy checks."""
def __init__(self, kind, match):
"""Initiates Check instance.
:param kind: The kind of the check, i.e., the field before the
':'.
:param match: The match of the check, i.e., the field after
the ':'.
"""
self.kind = kind
self.match = match
def __str__(self):
"""Return a string representation of this check."""
return "%s:%s" % (self.kind, self.match)
class NotCheck(BaseCheck):
"""Implements the "not" logical operator.
A policy check that inverts the result of another policy check.
"""
def __init__(self, rule):
"""Initialize the 'not' check.
:param rule: The rule to negate. Must be a Check.
"""
self.rule = rule
def __str__(self):
"""Return a string representation of this check."""
return "not %s" % self.rule
def __call__(self, target, cred, enforcer):
"""Check the policy.
Returns the logical inverse of the wrapped check.
"""
return not self.rule(target, cred, enforcer)
class AndCheck(BaseCheck):
"""Implements the "and" logical operator.
A policy check that requires that a list of other checks all return True.
"""
def __init__(self, rules):
"""Initialize the 'and' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' and '.join(str(r) for r in self.rules)
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that all rules accept in order to return True.
"""
for rule in self.rules:
if not rule(target, cred, enforcer):
return False
return True
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the AndCheck object for convenience.
"""
self.rules.append(rule)
return self
class OrCheck(BaseCheck):
"""Implements the "or" operator.
A policy check that requires that at least one of a list of other
checks returns True.
"""
def __init__(self, rules):
"""Initialize the 'or' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' or '.join(str(r) for r in self.rules)
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that at least one rule accept in order to return True.
"""
for rule in self.rules:
if rule(target, cred, enforcer):
return True
return False
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the OrCheck object for convenience.
"""
self.rules.append(rule)
return self
def _parse_check(rule):
"""Parse a single base check rule into an appropriate Check object."""
# Handle the special checks
if rule == '!':
return FalseCheck()
elif rule == '@':
return TrueCheck()
try:
kind, match = rule.split(':', 1)
except Exception:
LOG.exception(_("Failed to understand rule %s") % rule)
# If the rule is invalid, we'll fail closed
return FalseCheck()
# Find what implements the check
if kind in _checks:
return _checks[kind](kind, match)
elif None in _checks:
return _checks[None](kind, match)
else:
LOG.error(_("No handler for matches of kind %s") % kind)
return FalseCheck()
def _parse_list_rule(rule):
"""Translates the old list-of-lists syntax into a tree of Check objects.
Provided for backwards compatibility.
"""
# Empty rule defaults to True
if not rule:
return TrueCheck()
# Outer list is joined by "or"; inner list by "and"
or_list = []
for inner_rule in rule:
# Elide empty inner lists
if not inner_rule:
continue
# Handle bare strings
if isinstance(inner_rule, basestring):
inner_rule = [inner_rule]
# Parse the inner rules into Check objects
and_list = [_parse_check(r) for r in inner_rule]
# Append the appropriate check to the or_list
if len(and_list) == 1:
or_list.append(and_list[0])
else:
or_list.append(AndCheck(and_list))
# If we have only one check, omit the "or"
if not or_list:
return FalseCheck()
elif len(or_list) == 1:
return or_list[0]
return OrCheck(or_list)
# Used for tokenizing the policy language
_tokenize_re = re.compile(r'\s+')
def _parse_tokenize(rule):
"""Tokenizer for the policy language.
Most of the single-character tokens are specified in the
_tokenize_re; however, parentheses need to be handled specially,
because they can appear inside a check string. Thankfully, those
parentheses that appear inside a check string can never occur at
the very beginning or end ("%(variable)s" is the correct syntax).
"""
for tok in _tokenize_re.split(rule):
# Skip empty tokens
if not tok or tok.isspace():
continue
# Handle leading parens on the token
clean = tok.lstrip('(')
for i in range(len(tok) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
tok = clean
# Handle trailing parens on the token
clean = tok.rstrip(')')
trail = len(tok) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(tok) >= 2 and ((tok[0], tok[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', tok[1:-1]
else:
yield 'check', _parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
class ParseStateMeta(type):
"""Metaclass for the ParseState class.
Facilitates identifying reduction methods.
"""
def __new__(mcs, name, bases, cls_dict):
"""Create the class.
Injects the 'reducers' list, a list of tuples matching token sequences
to the names of the corresponding reduction methods.
"""
reducers = []
for key, value in cls_dict.items():
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:
reducers.append((reduction, key))
cls_dict['reducers'] = reducers
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
def reducer(*tokens):
"""Decorator for reduction methods.
Arguments are a sequence of tokens, in order, which should trigger running
this reduction method.
"""
def decorator(func):
# Make sure we have a list of reducer sequences
if not hasattr(func, 'reducers'):
func.reducers = []
# Add the tokens to the list of reducer sequences
func.reducers.append(list(tokens))
return func
return decorator
class ParseState(object):
"""Implement the core of parsing the policy language.
Uses a greedy reduction algorithm to reduce a sequence of tokens into
a single terminal, the value of which will be the root of the Check tree.
Note: error reporting is rather lacking. The best we can get with
this parser formulation is an overall "parse failed" error.
Fortunately, the policy language is simple enough that this
shouldn't be that big a problem.
"""
__metaclass__ = ParseStateMeta
def __init__(self):
"""Initialize the ParseState."""
self.tokens = []
self.values = []
def reduce(self):
"""Perform a greedy reduction of the token stream.
If a reducer method matches, it will be executed, then the
reduce() method will be called recursively to search for any more
possible reductions.
"""
for reduction, methname in self.reducers:
if (len(self.tokens) >= len(reduction) and
self.tokens[-len(reduction):] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-len(reduction):])
# Update the tokens and values
self.tokens[-len(reduction):] = [r[0] for r in results]
self.values[-len(reduction):] = [r[1] for r in results]
# Check for any more reductions
return self.reduce()
def shift(self, tok, value):
"""Adds one more token to the state. Calls reduce()."""
self.tokens.append(tok)
self.values.append(value)
# Do a greedy reduce...
self.reduce()
@property
def result(self):
"""Obtain the final result of the parse.
Raises ValueError if the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError("Could not parse rule")
return self.values[0]
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expressions into a 'check' token."""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""Create an 'and_expr'.
Join two checks by the 'and' operator.
"""
return [('and_expr', AndCheck([check1, check2]))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""Extend an 'and_expr' by adding one more check."""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""Create an 'or_expr'.
Join two checks by the 'or' operator.
"""
return [('or_expr', OrCheck([check1, check2]))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""Extend an 'or_expr' by adding one more check."""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of another check."""
return [('check', NotCheck(check))]
def _parse_text_rule(rule):
"""Parses policy to the tree.
Translates a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return TrueCheck()
# Parse the token stream
state = ParseState()
for tok, value in _parse_tokenize(rule):
state.shift(tok, value)
try:
return state.result
except ValueError:
# Couldn't parse the rule
LOG.exception(_("Failed to understand rule %r") % rule)
# Fail closed
return FalseCheck()
def parse_rule(rule):
"""Parses a policy rule into a tree of Check objects."""
# If the rule is a string, it's in the policy language
if isinstance(rule, basestring):
return _parse_text_rule(rule)
return _parse_list_rule(rule)
def register(name, func=None):
"""Register a function or Check class as a policy check.
:param name: Gives the name of the check type, e.g., 'rule',
'role', etc. If name is None, a default check type
will be registered.
:param func: If given, provides the function or class to register.
If not given, returns a function taking one argument
to specify the function or class to register,
allowing use as a decorator.
"""
# Perform the actual decoration by registering the function or
# class. Returns the function or class for compliance with the
# decorator interface.
def decorator(func):
_checks[name] = func
return func
# If the function or class is given, do the registration
if func:
return decorator(func)
return decorator
@register("rule")
class RuleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Recursively checks credentials based on the defined rules."""
try:
return enforcer.rules[self.match](target, creds, enforcer)
except KeyError:
# We don't have any matching rule; fail closed
return False
@register("role")
class RoleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check that there is a matching role in the cred dict."""
return self.match.lower() in [x.lower() for x in creds['roles']]
@register('http')
class HttpCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check http: rules by calling to a remote server.
This example implementation simply verifies that the response
is exactly 'True'.
"""
url = ('http:' + self.match) % target
data = {'target': jsonutils.dumps(target),
'credentials': jsonutils.dumps(creds)}
post_data = urllib.urlencode(data)
f = urllib2.urlopen(url, post_data)
return f.read() == "True"
@register(None)
class GenericCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check an individual match.
Matches look like:
tenant:%(tenant_id)s
role:compute:admin
"""
# TODO(termie): do dict inspection via dot syntax
try:
match = self.match % target
except KeyError:
# While doing GenericCheck if key not
# present in Target return false
return False
if self.kind in creds:
return match == six.text_type(creds[self.kind])
return False
|
|
'''
Created on 03.06.2011
@author: Sergey Khayrulin
'''
import math, re, pprint
class ParserException(Exception):
'''
User Exception
'''
def __init__(self,value):
self.value = value
def __str__(self):
return repr(self.value)
class HelpPoint:
'''
Helper Class
'''
def __init__(self,point, lenght):
self.point = point
self.lenght = lenght
def __eq__(self,p):
if self.point == p.point:
return True
else:
return False
class Result_Point:
'''
Definition for resulting point:
point - WrlPoint3D
parentPoint - is index of parent point index from resultin_points collection
cable - int number which indicate to what cableGroup
isBrunchStart - indicate when the point is a start of branching
'''
def __init__(self,point,parentPoint,cable=0,isBrunchStart=False):
self.point = point
self.cable = cable
self.isAxon = False
self.isDendrite = False
self.isNeurite = False
self.parentPoint = parentPoint
self.isBrunchStart = isBrunchStart
def getRoot(self, resulting_points):
'''
Get root point
'''
if self.parentPoint != 0:
return resulting_points[self.parentPoint].getRoot(resulting_points)
else:
if self.parentPoint == 0:
return resulting_points.index(self)
class Faces(dict):
'''
definition for collection of face
inherit from dictionary
Faces - contains pair (key=>value) where key is sorted order of face.
'''
def __init__(self):
dict.__init__({})
self.faces_with_points = {} # cache by point
def __setitem__(self,key,value):
'''
Set item with key to Faces
'''
if key.__class__ == list:
keystr = self.__transformkey(key)
super(Faces, self).__setitem__(keystr, value)
elif key.__class__ == str:
super(Faces, self).__setitem__(key, value)
self.faces_with_points = {}
def __getitem__(self,key):
'''
Get item with key from Faces
'''
if key.__class__ == list:
keystr = self.__transformkey(key)
return super(Faces, self).__getitem__(keystr)
elif key.__class__ == str:
return super(Faces, self).__getitem__(key)
def __missing__(self,key):
'''
This method run if __getitem__ didn't find any item with key
'''
return None
def __transformkey(self, keyArray):
'''
Transform key from list to string
'''
keyArray = sorted(keyArray)
return " %s, %s, %s, %s,"%(keyArray[0],keyArray[1],keyArray[2],keyArray[3])
def __Regen_Dict(self):
for key in self.keys():
tokens = re.split('[,\s]+', key)
for tok in tokens:
if not tok: continue
p = int(tok)
try:
self.faces_with_points[p].append(self[key])
except KeyError:
self.faces_with_points[p] = [self[key]]
def WithPoints(self, p0, p1):
if not self.faces_with_points:
self.__Regen_Dict()
try:
list0 = self.faces_with_points[p0]
except KeyError:
list0 = []
try:
list1 = self.faces_with_points[p1]
except KeyError:
list1 = []
return list0 + list1
def clean_all(self):
self.faces_with_points = {}
def hasFaceWithSide(self, num_p1, num_p2):
face = self.getFaceWithSide(num_p1, num_p2)
return face is not None
def getFaceWithSide(self, num_p1, num_p2):
'''
Check for two vertices num_1, num_2
Is collection of faces contain face with such side.
'''
if 0:
faceWithpoints = [self[key] for key in filter(lambda k:" %s,"%num_p1 in k and " %s,"%num_p2 in k,self.keys())]
else:
if not self.faces_with_points:
self.__Regen_Dict()
faceWithpoints = []
for p in (num_p1, num_p2):
if self.faces_with_points.has_key(p):
for face in self.faces_with_points[p]:
if num_p1 in face.order and num_p2 in face.order:
faceWithpoints.append(face)
for face in faceWithpoints:
if abs(face.order.index(num_p1) - face.order.index(num_p2)) == 1 or abs(face.order.index(num_p1) - face.order.index(num_p2)) == 3:
return face
return None
eps = 0.0001
class Vector(object):
'''
classdocs
'''
def __init__(self, p1, p2 = None):
'''
Constructor
'''
if p2 is None:
self.x = p1[0]
self.y = p1[1]
self.z = p1[2]
else:
self.x = p2.x - p1.x
self.y = p2.y - p1.y
self.z = p2.z - p1.z
def __mul__(self, vector):
return self.x * vector.x + self.y * vector.y + self.z * vector.z
#def __div__(self, f):
# return WRLVector((self.x / f, self.y / f, self.z / f))
#def __add__(self, vector):
# return WRLVector((self.x + vector.x, self.y + vector.y, self.z + vector.z))
def length(self):
return math.sqrt(self.x * self.x + self.y * self.y + self.z * self.z)
def angle_between(self, vector):
l2 = self.length() * vector.length()
if l2 == 0:
return 0.0
try:
return math.acos(self * vector / l2)
except ValueError:
if self * vector / l2 > 1:
return 0.0
print("%s angle_between %s %f" % (self, vector, self*vector))
raise
def perpendicular(self, vector):
return Vector((self.y*vector.z - self.z*vector.y,
self.z*vector.x - self.x*vector.z,
self.x*vector.y - self.y*vector.x))
def __str__(self):
return '[%.3f,%.3f,%.3f]' % (self.x, self.y, self.z)
__repr__ = __str__
def as_tuple(self):
return (self.x, self.y, self.z)
|
|
#!/usr/bin/python
"""
Example script for bridging the semantic gap from
SATA -> Disk actions -> File system activity
(c) 2015 Massachusetts Institute of Technology
"""
# Native
import os
import sys
import argparse
import time
import multiprocessing
import subprocess
import logging
logger = logging.getLogger(__name__)
# LOPHI
import lophi.globals as G
from lophi.data import DiskSensorPacket
from lophi.capture import CaptureReader
from lophi.dataconsumers.logger import LoPhiLogger
from lophi_semanticgap.disk.sata import SATAInterpreter
from lophi_semanticgap.disk.sata_reconstructor import SATAReconstructor, PhysicalPacket
from lophi_semanticgap.disk.filesystem_reconstructor import SemanticEngineDisk
# Defaults
default_dest_ip = "172.20.1.2"
## ========================================
## Code to do real-time analysis of
## the live cap
##
class Analyze(multiprocessing.Process):
"""
This class will take a dcap file as input and output nicely formatted
data.
"""
def __init__(self,
dcap_filename,
disk_img,
output_queues,
tail_enable=False,
sensor_type=G.MACHINE_TYPES.PHYSICAL
):
"""
Store our input variables for later.
"""
self.dcap_filename = dcap_filename
self.disk_img = disk_img
self.output_queues = output_queues
self.tail_enable = tail_enable
self.sensor_type = sensor_type
self.output_dir = "/".join(dcap_filename.split("/")[:-1])
multiprocessing.Process.__init__(self)
def run(self):
"""
This function will read a raw disk capture and use a scanned disk
image to reconstruct the recorded SATA traffic and output the
semantic output.
"""
# copy our disk image to a temporary working image
self.working_disk_img = os.path.join(self.output_dir, "disk.img.tmp")
print "* Creating temporary working image from disk scan. (%s)"%self.working_disk_img
# Delete, copy, chmod new file
try:
os.unlink(self.working_disk_img)
except:
pass
cmd = "cp --sparse=always %s %s" % (self.disk_img, self.working_disk_img)
subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read()
os.chmod(self.working_disk_img, 0755)
# Set up our semantic bridge
print "* Parsing disk image %s into our semantic engine... (This may take a while)" % self.working_disk_img
semantic_engine = SemanticEngineDisk(self.working_disk_img)
# Start processing our dcap
print "* Processing dcap file %s..." % self.dcap_filename
# SATA Interpreter
sata = SATAInterpreter()
"""
@TODO Extract sector size from PyTSK
"""
sata_reconstructor = SATAReconstructor(sector_size=G.SENSOR_DISK.DEFAULT_SECTOR_SIZE)
# read from the cap file in real time
reader = CaptureReader(self.dcap_filename)
# Tailing or terminating?
reader_iter = reader
if self.tail_enable:
reader_iter = reader.tail()
# Loop over all of the dcap contents
for (timestamp, data) in reader_iter:
if self.sensor_type == G.MACHINE_TYPES.PHYSICAL:
(header, data) = sata.extract_sata_data(data)
# deal with SATA NCQ reordering
disk_sensor_pkts = sata_reconstructor.process_packet(PhysicalPacket(header, data))
else:
disk_sensor_pkts = [DiskSensorPacket(data)]
# Process all of our disk packets
if disk_sensor_pkts:
for dsp in disk_sensor_pkts:
# Skip empty packets
if not dsp:
continue
try:
fs_operations = semantic_engine.get_access(dsp.sector,
dsp.num_sectors,
dsp.disk_operation,
dsp.data)
self.log_output(timestamp, fs_operations)
except:
logging.exception("Encountered error while trying to bridge semantic gap for this disk access.")
def log_output(self, timestamp, fs_operations):
"""
This function will take in a list of actions from
our semantic engine and output it to all of our output queues
@param timestamp: timestamp associated with these actions
@param fs_operations: list of filesystem operations from our
semantic engine
"""
# Anything new happen?
if not fs_operations:
return
# aggregate actions since we do it sector by sector
actions = []
current_action = None
last_sector = 0
for fs_op in fs_operations:
if not current_action:
current_action = fs_op
last_sector = current_action['sector']
continue
# check if this fs_op has the same inode and consecutive sector
if ((fs_op['inode'] == current_action['inode']) and
(fs_op['sector'] == last_sector + 1)):
# aggregate the data and continue
current_action['raw_data'] = current_action['raw_data'] + fs_op['raw_data']
last_sector += 1
#print "Aggregating inode %s, file %s, sector %d" % (str(fs_op['inode']), fs_op['filename'], fs_op['sector'])
else:
# otherwise, add the current_action to our list of actions and
# start a new current_action based on fs_op
actions.append(current_action)
current_action = fs_op
last_sector = current_action['sector']
# make sure that the last current_action got added
actions.append(current_action)
# Setup our output dictionary to pass back to our aggregator
output = {}
# Package for log file
output['MODULE'] = "LOPHI"
#output['MACHINE'] = "JADOCS"
#output['PROFILE'] = "WinXPSP3x86"
#output['PROFILE'] = "Win7"
output['SENSOR'] = "disk"
# Append timestamp
for a in actions:
a["Timestamp"] = timestamp
a["Content"] = ""
# Header and data
# {'sector':sector, 'op':op, 'op_type':op_type, 'inode':mft_record_no, 'filename':filename, 'raw_data':raw_data, 'semantic_data':semantic_data}
#output['HEADER'] = ['Timestamp','Operation','Filename','Content']
output['HEADER'] = ['Timestamp','Operation','Filename','Sector','Inode','Semantic Data']
out_data = []
# Output each action
for action in actions:
filename = ""
original_path = ""
# Debug info
if action['filename'] is None:
logger.debug("NOP")
else:
logger.debug("%s: %s" % (action['op_type'], action['filename']))
semantic_data = None
# Append to output
out_data.append([action['Timestamp'],
action['op_type'],
action['filename'],
action['sector'],
action['inode'],
""
])
if action['semantic_data'] is not None:
for sd in action['semantic_data']:
if len(sd['changes']) > 0:
for change in sd['changes']:
meta_old = str(sd['changes'][change]['old'])
meta_new = str(sd['changes'][change]['new'])
if change == "atime" or change == "mtime" or change == "ctime" or change == "crtime":
meta_old = time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(sd['changes'][change]['old']))
meta_new = time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(sd['changes'][change]['new']))
out_data.append([action['Timestamp'],
"["+change.upper()+" MODIFIED]",
sd['filename'],
action['sector'],
meta_old + " -> " + meta_new
])
output['DATA'] = out_data
for q in self.output_queues:
q.put(output)
for out in out_data:
if out[2] != "unknown":
if isinstance(out[4],dict):
print "%s %s"%(out[1],out[2])
else:
print "%s %s %s"%(out[1],out[2], out[4])
def main(options):
# Setup our log files
dcap_filename = options.dcap_file
# Our output will just be the same filename with a CSV extension
(base_name, ext) = os.path.splitext(dcap_filename)
log_csv_filename = base_name +".csv"
# create csv logger
log_csv_queue = multiprocessing.Queue()
log_csv_writer = LoPhiLogger(log_csv_queue,
filename=log_csv_filename,
filetype="csv")
log_csv_writer.start()
# start real-time analysis
print "* Storing output in %s" % log_csv_filename
analysis_process = Analyze(dcap_filename,
options.disk_img,
[log_csv_queue],
tail_enable=options.tail_enable,
sensor_type=options.sensor_type)
analysis_process.run()
log_csv_writer.terminate()
if __name__ == "__main__":
# Get our machine types
machine_types = {}
for x in G.MACHINE_TYPES.__dict__:
if x != "ASCII" and not x.startswith("_"):
machine_types[x] = G.MACHINE_TYPES.__dict__[x]
# Import our command line parser
parser = argparse.ArgumentParser()
# Capture type
parser.add_argument("-T", "--type", action="store", type=int,
dest="sensor_type", default=0,
help="Type of sensor. %s"%machine_types)
# Scan file
parser.add_argument("-s", "--disk_img", action="store", dest="disk_img",
default=None,
help="Scanned disk image filename. (e.g. disk.img)")
# Directory where we store output
parser.add_argument("-i", "--dcap_file", action="store", dest="dcap_file",
default=None,
help="Filename of a disk capture file. (e.g. lophi_disk_capture.dcap")
# Tail or offline analysis?
parser.add_argument("-t", "--tail", action="store_true", dest="tail_enable",
default=False,
help="Continuing tailing file. (Useful for live analysis)")
# Debug
parser.add_argument("-d", "--debug", action="store_true", help="Enable DEBUG")
# Get arguments
options = parser.parse_args()
# Get our log level
if options.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig()
# Make sure that a disk image was given
if options.disk_img is None:
logger.error("Please specify a disk scan image file.")
parser.print_help()
sys.exit(-1)
elif not os.path.exists(options.disk_img):
logger.error("Disk image file does not exist. (%s)"%options.disk_img)
parser.print_help()
sys.exit(-1)
# Make sure a dcap was given
if options.dcap_file is None:
logger.error("Please specify a disk capture file.")
parser.print_help()
sys.exit(-1)
elif not os.path.exists(options.dcap_file):
logger.error("Disk capture file does not exist. (%s)"%options.dcap_file)
parser.print_help()
sys.exit(-1)
main(options)
|
|
import cPickle
import gzip
import os, sys, errno
import time
import math
import glob
import struct
# numpy & theano imports need to be done in this order (only for some numpy installations, not sure why)
import numpy
# we need to explicitly import this in some cases, not sure why this doesn't get imported with numpy itself
import numpy.distutils.__config__
# and only after that can we import theano
import theano
from utils.providers import ListDataProvider
from frontend.label_normalisation import HTSLabelNormalisation, XMLLabelNormalisation
from frontend.silence_remover import SilenceRemover
from frontend.silence_remover import trim_silence
from frontend.min_max_norm import MinMaxNormalisation
#from frontend.acoustic_normalisation import CMPNormalisation
from frontend.acoustic_composition import AcousticComposition
from frontend.parameter_generation import ParameterGeneration
#from frontend.feature_normalisation_base import FeatureNormBase
from frontend.mean_variance_norm import MeanVarianceNorm
##from frontend.mlpg_fast import MLParameterGenerationFast
from frontend.mlpg import MLParameterGeneration as MLParameterGenerationFast ## osw temp
from io_funcs.binary_io import BinaryIOCollection
# the new class for label composition and normalisation
from frontend.label_composer import LabelComposer
import configuration
from models.dnn import DNN
from models.ms_dnn import MultiStreamDNN
from models.ms_dnn_gv import MultiStreamDNNGv
from models.sdae import StackedDenoiseAutoEncoder
from models.mdn import MixtureDensityNetwork
from utils.compute_distortion import DistortionComputation, IndividualDistortionComp
from utils.generate import generate_wav
from utils.learn_rates import ExpDecreaseLearningRate
#import matplotlib.pyplot as plt
# our custom logging class that can also plot
#from logplot.logging_plotting import LoggerPlotter, MultipleTimeSeriesPlot, SingleWeightMatrixPlot
from logplot.logging_plotting import LoggerPlotter, MultipleSeriesPlot, SingleWeightMatrixPlot
import logging # as logging
import logging.config
import StringIO
def extract_file_id_list(file_list):
file_id_list = []
for file_name in file_list:
file_id = os.path.basename(os.path.splitext(file_name)[0])
file_id_list.append(file_id)
return file_id_list
def read_file_list(file_name):
logger = logging.getLogger("read_file_list")
file_lists = []
fid = open(file_name)
for line in fid.readlines():
line = line.strip()
if len(line) < 1:
continue
file_lists.append(line)
fid.close()
logger.debug('Read file list from %s' % file_name)
return file_lists
def make_output_file_list(out_dir, in_file_lists):
out_file_lists = []
for in_file_name in in_file_lists:
file_id = os.path.basename(in_file_name)
out_file_name = out_dir + '/' + file_id
out_file_lists.append(out_file_name)
return out_file_lists
def prepare_file_path_list(file_id_list, file_dir, file_extension, new_dir_switch=True):
if not os.path.exists(file_dir) and new_dir_switch:
os.makedirs(file_dir)
file_name_list = []
for file_id in file_id_list:
file_name = file_dir + '/' + file_id + file_extension
file_name_list.append(file_name)
return file_name_list
def visualize_dnn(dnn):
layer_num = len(dnn.params) / 2 ## including input and output
for i in xrange(layer_num):
fig_name = 'Activation weights W' + str(i)
fig_title = 'Activation weights of W' + str(i)
xlabel = 'Neuron index of hidden layer ' + str(i)
ylabel = 'Neuron index of hidden layer ' + str(i+1)
if i == 0:
xlabel = 'Input feature index'
if i == layer_num-1:
ylabel = 'Output feature index'
logger.create_plot(fig_name, SingleWeightMatrixPlot)
plotlogger.add_plot_point(fig_name, fig_name, dnn.params[i*2].get_value(borrow=True).T)
plotlogger.save_plot(fig_name, title=fig_name, xlabel=xlabel, ylabel=ylabel)
### plain DNN case
# def dnn_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list):
# logger = logging.getLogger("dnn_generation")
# logger.debug('Starting dnn_generation')
#
# plotlogger = logging.getLogger("plotting")
#
# dnn_model = cPickle.load(open(nnets_file_name, 'rb'))
#
# # visualize_dnn(dbn)
#
# file_number = len(valid_file_list)
#
# for i in xrange(file_number):
# logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
# fid_lab = open(valid_file_list[i], 'rb')
# features = numpy.fromfile(fid_lab, dtype=numpy.float32)
# fid_lab.close()
# features = features[:(n_ins * (features.size / n_ins))]
# features = features.reshape((-1, n_ins))
# temp_set_x = features.tolist()
# test_set_x = theano.shared(numpy.asarray(temp_set_x, dtype=theano.config.floatX))
#
# predicted_parameter = dnn_model.parameter_prediction(test_set_x=test_set_x)
# # predicted_parameter = test_out()
#
# ### write to cmp file
# predicted_parameter = numpy.array(predicted_parameter, 'float32')
# temp_parameter = predicted_parameter
# fid = open(out_file_list[i], 'wb')
# predicted_parameter.tofile(fid)
# logger.debug('saved to %s' % out_file_list[i])
# fid.close()
#
### multiple Gaussian components
def dnn_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list, target_mean_vector, target_std_vector, out_dimension_dict, file_extension_dict, vocoder='straight'):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
inf_float = -1.0e+10
plotlogger = logging.getLogger("plotting")
cfg.gen_wav_features
if vocoder == 'straight':
gen_wav_features = ['mgc', 'lf0', 'bap']
elif vocoder == 'glotthmm':
gen_wav_features = ['F0', 'Gain', 'HNR', 'LSF','LSFsource'] ## TODO: take this from config
else:
sys.exit('unsupported vocoder %s !'%(vocoder))
stream_start_index = {}
dimension_index = 0
for feature_name in out_dimension_dict.keys():
stream_start_index[feature_name] = dimension_index
dimension_index += out_dimension_dict[feature_name]
dnn_model = cPickle.load(open(nnets_file_name, 'rb'))
file_number = len(valid_file_list)
io_funcs = BinaryIOCollection()
mlpg = MLParameterGenerationFast()
for i in xrange(file_number):
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
features = features.reshape((-1, n_ins))
frame_number = features.shape[0]
test_set_x = theano.shared(numpy.asarray(features, dtype=theano.config.floatX))
mean_matrix = numpy.tile(target_mean_vector, (features.shape[0], 1))
std_matrix = numpy.tile(target_std_vector, (features.shape[0], 1))
predicted_mix = dnn_model.parameter_prediction_mix(test_set_x = test_set_x)
max_index = numpy.argmax(predicted_mix, axis=1)
temp_predicted_mu = dnn_model.parameter_prediction(test_set_x=test_set_x)
temp_predicted_sigma = dnn_model.parameter_prediction_sigma(test_set_x = test_set_x)
predicted_mu = numpy.zeros((temp_predicted_mu.shape[0], n_outs))
predicted_sigma = numpy.zeros((temp_predicted_sigma.shape[0], n_outs))
for kk in xrange(temp_predicted_mu.shape[0]):
predicted_mu[kk, :] = temp_predicted_mu[kk, max_index[kk]*n_outs:(max_index[kk]+1)*n_outs]
predicted_sigma[kk, :] = temp_predicted_sigma[kk, max_index[kk]*n_outs:(max_index[kk]+1)*n_outs]
# print predicted_mu.shape
# predicted_mu = predicted_mu[aa*n_outs:(aa+1)*n_outs]
predicted_mu = predicted_mu * std_matrix + mean_matrix
predicted_sigma = ((predicted_sigma ** 0.5) * std_matrix ) ** 2
dir_name = os.path.dirname(out_file_list[i])
file_id = os.path.splitext(os.path.basename(out_file_list[i]))[0]
mlpg = MLParameterGenerationFast()
for feature_name in gen_wav_features:
current_features = predicted_mu[:, stream_start_index[feature_name]:stream_start_index[feature_name]+out_dimension_dict[feature_name]]
current_sigma = predicted_sigma[:, stream_start_index[feature_name]:stream_start_index[feature_name]+out_dimension_dict[feature_name]]
gen_features = mlpg.generation(current_features, current_sigma, out_dimension_dict[feature_name]/3)
if feature_name in ['lf0', 'F0']:
if stream_start_index.has_key('vuv'):
vuv_feature = predicted_mu[:, stream_start_index['vuv']:stream_start_index['vuv']+1]
for i in xrange(frame_number):
if vuv_feature[i, 0] < 0.5:
gen_features[i, 0] = inf_float
# print gen_features
new_file_name = os.path.join(dir_name, file_id + file_extension_dict[feature_name])
io_funcs.array_to_binary_file(gen_features, new_file_name)
##generate bottleneck layer as festures
def dnn_hidden_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = cPickle.load(open(nnets_file_name, 'rb'))
file_number = len(valid_file_list)
for i in xrange(file_number):
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
features = features.reshape((-1, n_ins))
temp_set_x = features.tolist()
test_set_x = theano.shared(numpy.asarray(temp_set_x, dtype=theano.config.floatX))
predicted_parameter = dnn_model.generate_top_hidden_layer(test_set_x=test_set_x)
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
def main_function(cfg, in_dir, out_dir):
# get a logger for this main function
logger = logging.getLogger("main")
# get another logger to handle plotting duties
plotlogger = logging.getLogger("plotting")
# later, we might do this via a handler that is created, attached and configured
# using the standard config mechanism of the logging module
# but for now we need to do it manually
plotlogger.set_plot_path(cfg.plot_dir)
#### parameter setting########
hidden_layers_sizes = cfg.hyper_params['hidden_layers_sizes']
synth_utts = glob.glob(in_dir + '/*.utt')
file_id_list = []
for fname in synth_utts:
junk,name = os.path.split(fname)
file_id_list.append(name.replace('.utt',''))
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
###total file number including training, development, and testing
#total_file_number = len(file_id_list)
data_dir = cfg.data_dir
#nn_cmp_dir = os.path.join(data_dir, 'nn' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim))
#nn_cmp_norm_dir = os.path.join(data_dir, 'nn_norm' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim))
model_dir = os.path.join(cfg.work_dir, 'nnets_model')
gen_dir = os.path.join(out_dir, 'gen')
#in_file_list_dict = {}
#for feature_name in cfg.in_dir_dict.keys():
# in_file_list_dict[feature_name] = prepare_file_path_list(file_id_list, cfg.in_dir_dict[feature_name], cfg.file_extension_dict[feature_name], False)
#nn_cmp_file_list = prepare_file_path_list(file_id_list, nn_cmp_dir, cfg.cmp_ext)
#nn_cmp_norm_file_list = prepare_file_path_list(file_id_list, nn_cmp_norm_dir, cfg.cmp_ext)
###normalisation information
norm_info_file = os.path.join(data_dir, 'norm_info' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim) + '_' + cfg.output_feature_normalisation + '.dat')
### normalise input full context label
# currently supporting two different forms of lingustic features
# later, we should generalise this
if cfg.label_style == 'HTS':
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name)
lab_dim = label_normaliser.dimension
logger.info('Input label dimension is %d' % lab_dim)
suffix=str(lab_dim)
# no longer supported - use new "composed" style labels instead
elif cfg.label_style == 'composed':
# label_normaliser = XMLLabelNormalisation(xpath_file_name=cfg.xpath_file_name)
suffix='composed'
# the number can be removed
binary_label_dir = os.path.join(out_dir, 'lab_bin')
nn_label_norm_dir = os.path.join(out_dir, 'lab_bin_norm')
binary_label_file_list = prepare_file_path_list(file_id_list, binary_label_dir, cfg.lab_ext)
nn_label_norm_file_list = prepare_file_path_list(file_id_list, nn_label_norm_dir, cfg.lab_ext)
## need this to find normalisation info:
if cfg.process_labels_in_work_dir:
label_data_dir = cfg.work_dir
else:
label_data_dir = data_dir
min_max_normaliser = None
label_norm_file = 'label_norm_%s.dat' %(cfg.label_style)
label_norm_file = os.path.join(label_data_dir, label_norm_file)
if cfg.label_style == 'HTS':
sys.exit('script not tested with HTS labels')
# simple HTS labels
# logger.info('preparing label data (input) using standard HTS style labels')
# label_normaliser.perform_normalisation(in_label_align_file_list, binary_label_file_list)
# remover = SilenceRemover(n_cmp = lab_dim, silence_pattern = ['*-#+*'])
# remover.remove_silence(binary_label_file_list, in_label_align_file_list, nn_label_file_list)
# min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99)
# ###use only training data to find min-max information, then apply on the whole dataset
# min_max_normaliser.find_min_max_values(nn_label_file_list[0:cfg.train_file_number])
# min_max_normaliser.normalise_data(nn_label_file_list, nn_label_norm_file_list)
logger.info('preparing label data (input) using "composed" style labels')
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
logger.info('Loaded label configuration')
# logger.info('%s' % label_composer.configuration.labels )
lab_dim=label_composer.compute_label_dimension()
logger.info('label dimension will be %d' % lab_dim)
if cfg.precompile_xpaths:
label_composer.precompile_xpaths()
# there are now a set of parallel input label files (e.g, one set of HTS and another set of Ossian trees)
# create all the lists of these, ready to pass to the label composer
in_label_align_file_list = {}
for label_style, label_style_required in label_composer.label_styles.iteritems():
if label_style_required:
logger.info('labels of style %s are required - constructing file paths for them' % label_style)
if label_style == 'xpath':
in_label_align_file_list['xpath'] = prepare_file_path_list(file_id_list, in_dir, cfg.utt_ext, False)
elif label_style == 'hts':
logger.critical('script not tested with HTS labels')
else:
logger.critical('unsupported label style %s specified in label configuration' % label_style)
raise Exception
# now iterate through the files, one at a time, constructing the labels for them
num_files=len(file_id_list)
logger.info('the label styles required are %s' % label_composer.label_styles)
for i in xrange(num_files):
logger.info('making input label features for %4d of %4d' % (i+1,num_files))
# iterate through the required label styles and open each corresponding label file
# a dictionary of file descriptors, pointing at the required files
required_labels={}
for label_style, label_style_required in label_composer.label_styles.iteritems():
# the files will be a parallel set of files for a single utterance
# e.g., the XML tree and an HTS label file
if label_style_required:
required_labels[label_style] = open(in_label_align_file_list[label_style][i] , 'r')
logger.debug(' opening label file %s' % in_label_align_file_list[label_style][i])
logger.debug('label styles with open files: %s' % required_labels)
label_composer.make_labels(required_labels,out_file_name=binary_label_file_list[i],fill_missing_values=cfg.fill_missing_values,iterate_over_frames=cfg.iterate_over_frames)
# now close all opened files
for fd in required_labels.itervalues():
fd.close()
# no silence removal for synthesis ...
## minmax norm:
min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99)
# reload stored minmax values: (TODO -- move reading and writing into MinMaxNormalisation class)
fid = open(label_norm_file, 'rb')
## This doesn't work -- precision is lost -- reads in as float64
#label_norm_info = numpy.fromfile(fid) ## label_norm_info = numpy.array(label_norm_info, 'float32')
## use struct to enforce float32:
nbytes = os.stat(label_norm_file)[6] # length in bytes
data = fid.read(nbytes) # = read until bytes run out
fid.close()
m = nbytes / 4 ## number 32 bit floats
format = str(m)+"f"
label_norm_info = struct.unpack(format, data)
label_norm_info = numpy.array(label_norm_info)
min_max_normaliser.min_vector = label_norm_info[:m/2]
min_max_normaliser.max_vector = label_norm_info[m/2:]
### apply precompuated min-max to the whole dataset
min_max_normaliser.normalise_data(binary_label_file_list, nn_label_norm_file_list)
### make output acoustic data
# if cfg.MAKECMP:
### retrieve acoustic normalisation information for normalising the features back
var_dir = os.path.join(data_dir, 'var')
var_file_dict = {}
for feature_name in cfg.out_dimension_dict.keys():
var_file_dict[feature_name] = os.path.join(var_dir, feature_name + '_' + str(cfg.out_dimension_dict[feature_name]))
### normalise output acoustic data
# if cfg.NORMCMP:
combined_model_arch = str(len(hidden_layers_sizes))
for hid_size in hidden_layers_sizes:
combined_model_arch += '_' + str(hid_size)
nnets_file_name = '%s/%s_%s_%d_%s_%d.%d.train.%d.mdn.model' \
%(model_dir, cfg.model_type, cfg.combined_feature_name, int(cfg.multistream_switch),
combined_model_arch, lab_dim, cfg.cmp_dim, cfg.train_file_number)
### DNN model training
# if cfg.TRAINDNN:
##if cfg.DNNGEN:
logger.info('generating from DNN')
try:
os.makedirs(gen_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create generation directory %s' % gen_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
gen_file_list = prepare_file_path_list(file_id_list, gen_dir, cfg.cmp_ext)
assert cfg.output_feature_normalisation == 'MVN'
#gen_file_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.cmp_ext)
fid = open(norm_info_file, 'rb')
cmp_min_max = numpy.fromfile(fid, dtype=numpy.float32)
fid.close()
cmp_min_max = cmp_min_max.reshape((2, -1))
target_mean_vector = cmp_min_max[0, ]
target_std_vector = cmp_min_max[1, ]
# dnn_generation(valid_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list)
# dnn_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list)
dnn_generation(nn_label_norm_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list, target_mean_vector, target_std_vector, cfg.out_dimension_dict, cfg.file_extension_dict, vocoder='glotthmm')
## Variance scaling:
test_var_scaling=False
scaled_dir = gen_dir + '_scaled'
if test_var_scaling:
file_id_list = simple_scale_variance_CONTINUUM(gen_dir, scaled_dir, var_file_dict, cfg.out_dimension_dict, file_id_list)
else:
simple_scale_variance(gen_dir, scaled_dir, var_file_dict, cfg.out_dimension_dict, file_id_list, gv_weight=0.5) ## gv_weight hard coded here!
### generate wav ---- glottHMM only!!!
#if cfg.GENWAV:
logger.info('reconstructing waveform(s)')
generate_wav_glottHMM(scaled_dir, file_id_list) # generated speech
def simple_scale_variance(indir, outdir, var_file_dict, out_dimension_dict, file_id_list, gv_weight=1.0):
## simple variance scaling (silen et al. 2012, paragraph 3.1)
## TODO: Lots of things like stream names hardcoded here; 3 for delta + delta-delta; ...
all_streams = ['HNR','F0','LSF','Gain','LSFsource']
streams_to_scale = ['LSF']
static_variances = {}
static_dimension_dict = {}
for (feature_name,size) in out_dimension_dict.items():
static_dimension_dict[feature_name] = size/3
io_funcs = BinaryIOCollection()
for feature_name in var_file_dict.keys():
var_values, dimension = io_funcs.load_binary_file_frame(var_file_dict[feature_name], 1)
static_var_values = var_values[:static_dimension_dict[feature_name], :]
static_variances[feature_name] = static_var_values
if not os.path.isdir(outdir):
os.makedirs(outdir)
assert gv_weight <= 1.0 and gv_weight >= 0.0
local_weight = 1.0 - gv_weight
for uttname in file_id_list:
for stream in all_streams:
infile = os.path.join(indir, uttname + '.' + stream)
outfile = os.path.join(outdir, uttname + '.' + stream)
if not os.path.isfile(infile):
sys.exit(infile + ' does not exist')
if stream in streams_to_scale:
speech, dimension = io_funcs.load_binary_file_frame(infile, static_dimension_dict[stream])
utt_mean = numpy.mean(speech, axis=0)
utt_std = numpy.std(speech, axis=0)
global_std = numpy.transpose((static_variances[stream]))
weighted_global_std = (gv_weight * global_std) + (local_weight * utt_std)
std_ratio = weighted_global_std / utt_std
nframes, ndim = numpy.shape(speech)
utt_mean_matrix = numpy.tile(utt_mean, (nframes,1))
std_ratio_matrix = numpy.tile(std_ratio, (nframes,1))
scaled_speech = ((speech - utt_mean_matrix) * std_ratio_matrix) + utt_mean_matrix
io_funcs.array_to_binary_file(scaled_speech, outfile)
else:
os.system('cp %s %s'%(infile, outfile))
def simple_scale_variance_CONTINUUM(indir, outdir, var_file_dict, out_dimension_dict, file_id_list):
## Try range of interpolation weights for combining global & local variance
all_streams = ['cmp','HNR','F0','LSF','Gain','LSFsource']
streams_to_scale = ['LSF']
static_variances = {}
static_dimension_dict = {}
for (feature_name,size) in out_dimension_dict.items():
static_dimension_dict[feature_name] = size/3
io_funcs = BinaryIOCollection()
for feature_name in var_file_dict.keys():
var_values, dimension = io_funcs.load_binary_file_frame(var_file_dict[feature_name], 1)
static_var_values = var_values[:static_dimension_dict[feature_name], :]
static_variances[feature_name] = static_var_values
if not os.path.isdir(outdir):
os.makedirs(outdir)
file_id_list_out = []
for uttname in file_id_list:
for gv_weight in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
local_weight = 1.0 - gv_weight
for stream in all_streams:
infile = os.path.join(indir, uttname + '.' + stream)
extended_uttname = uttname + '_gv' + str(gv_weight)
print extended_uttname
outfile = os.path.join(outdir, extended_uttname + '.' + stream)
if not os.path.isfile(infile):
sys.exit(infile + ' does not exist')
if stream in streams_to_scale:
speech, dimension = io_funcs.load_binary_file_frame(infile, static_dimension_dict[stream])
utt_mean = numpy.mean(speech, axis=0)
utt_std = numpy.std(speech, axis=0)
global_std = numpy.transpose((static_variances[stream]))
weighted_global_std = (gv_weight * global_std) + (local_weight * utt_std)
std_ratio = weighted_global_std / utt_std
nframes, ndim = numpy.shape(speech)
utt_mean_matrix = numpy.tile(utt_mean, (nframes,1))
std_ratio_matrix = numpy.tile(std_ratio, (nframes,1))
scaled_speech = ((speech - utt_mean_matrix) * std_ratio_matrix) + utt_mean_matrix
io_funcs.array_to_binary_file(scaled_speech, outfile)
else:
os.system('cp %s %s'%(infile, outfile))
file_id_list_out.append(extended_uttname)
return file_id_list_out
def log_to_hertz(infile, outfile):
f = open(infile, 'r')
log_values = [float(val) for val in f.readlines()]
f.close()
def m2h(l):
h = math.exp(l)
return h
hertz = [m2h(l) for l in log_values]
f = open(outfile, 'w')
for val in hertz:
if val > 0:
f.write(str(val) + '\n')
else:
f.write('0.0\n')
f.close()
def generate_wav_glottHMM(gen_dir, gen_file_id_list):
x2x='~/repos/simple4all/CSTRVoiceClone/trunk/bin/x2x'
synthesis='~/sim2/oliver/nst_repos/OSSIAN/ossian-v.1.3/tools/GlottHMM/Synthesis'
general_glott_conf = '~/sim2/oliver/nst_repos/OSSIAN/ossian-v.1.3/voices/en/ky_02_toy/english_gold_basic_glott_KY/processors/speech_feature_extractor/main_config.cfg'
user_glott_conf = '~/sim2/oliver/nst_repos/OSSIAN/ossian-v.1.3/voices/en/ky_02_toy/english_gold_basic_glott_KY/processors/speech_feature_extractor/user_config.cfg'
exports = 'export LIBCONFIG_INSTALL_DIR=/afs/inf.ed.ac.uk/user/o/owatts/sim2/oliver/nst_repos/OSSIAN/ossian-v.1.3/tools/GlottHMM//libconfig-1.4.9 ; export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIBCONFIG_INSTALL_DIR/lib/.libs ; export LIBRARY_PATH=$LIBRARY_PATH:$LIBCONFIG_INSTALL_DIR/lib/.libs ; export CPATH=$CPATH:$LIBCONFIG_INSTALL_DIR/lib ;'
streams = ['HNR','F0','LSF','Gain','LSFsource']
for uttname in gen_file_id_list:
all_present = True
for stream in streams:
if not os.path.isfile(os.path.join(gen_dir, uttname + '.' + stream)):
all_present = False
if all_present:
for stream in streams:
extra = ''
if stream == 'F0':
extra = '.NEGVALS'
fname = os.path.join(gen_dir, uttname + '.' + stream)
fname_txt = os.path.join(gen_dir, uttname + '.txt.' + stream + extra)
comm = '%s +fa %s > %s'%(x2x, fname, fname_txt)
os.system(comm)
log_to_hertz(os.path.join(gen_dir, uttname + '.txt.F0.NEGVALS'), \
os.path.join(gen_dir, uttname + '.txt.F0'))
stem_name = os.path.join(gen_dir, uttname + '.txt')
comm = '%s %s %s %s %s'%(exports, synthesis, stem_name, general_glott_conf, user_glott_conf)
print comm
os.system(comm)
else:
print 'missing stream(s) for utterance ' + uttname
if __name__ == '__main__':
# these things should be done even before trying to parse the command line
# create a configuration instance
# and get a short name for this instance
cfg=configuration.cfg
# set up logging to use our custom class
logging.setLoggerClass(LoggerPlotter)
# get a logger for this main function
logger = logging.getLogger("main")
if len(sys.argv) != 4:
logger.critical('usage: run_dnn.sh config_file_name utt_dir')
sys.exit(1)
config_file = sys.argv[1]
in_dir = sys.argv[2]
out_dir = sys.argv[3]
config_file = os.path.abspath(config_file)
cfg.configure(config_file)
# if cfg.profile:
# logger.info('profiling is activated')
# import cProfile, pstats
# cProfile.run('main_function(cfg)', 'mainstats')
# # create a stream for the profiler to write to
# profiling_output = StringIO.StringIO()
# p = pstats.Stats('mainstats', stream=profiling_output)
# # print stats to that stream
# # here we just report the top 10 functions, sorted by total amount of time spent in each
# p.strip_dirs().sort_stats('tottime').print_stats(10)
# # print the result to the log
# logger.info('---Profiling result follows---\n%s' % profiling_output.getvalue() )
# profiling_output.close()
# logger.info('---End of profiling result---')
#
# else:
main_function(cfg, in_dir, out_dir)
sys.exit(0)
|
|
# -*- coding: utf-8 -*-
"""
requests_toolbelt.multipart
===========================
This holds all of the implementation details of the MultipartEncoder
"""
from requests.utils import super_len
from requests.packages.urllib3.filepost import iter_field_objects
from uuid import uuid4
import io
def encode_with(string, encoding):
"""Encoding ``string`` with ``encoding`` if necessary.
:param str string: If string is a bytes object, it will not encode it.
Otherwise, this function will encode it with the provided encoding.
:param str encoding: The encoding with which to encode string.
:returns: encoded bytes object
"""
if string and not isinstance(string, bytes):
return string.encode(encoding)
return string
class MultipartEncoder(object):
"""
The ``MultipartEncoder`` oject is a generic interface to the engine that
will create a ``multipart/form-data`` body for you.
The basic usage is::
import requests
from requests_toolbelt import MultipartEncoder
encoder = MultipartEncoder({'field': 'value',
'other_field', 'other_value'})
r = requests.post('https://httpbin.org/post', data=encoder,
headers={'Content-Type': encoder.content_type})
If you do not need to take advantage of streaming the post body, you can
also do::
r = requests.post('https://httpbin.org/post',
data=encoder.to_string(),
headers={'Content-Type': encoder.content_type})
"""
def __init__(self, fields, boundary=None, encoding='utf-8'):
#: Boundary value either passed in by the user or created
self.boundary_value = boundary or uuid4().hex
self.boundary = '--{0}'.format(self.boundary_value)
#: Default encoding
self.encoding = encoding
#: Fields passed in by the user
self.fields = fields
#: State of streaming
self.finished = False
# Most recently used data
self._current_data = None
# Length of the body
self._len = None
# Our buffer
self._buffer = CustomBytesIO(encoding=encoding)
# This a list of two-tuples containing the rendered headers and the
# data.
self._fields_list = []
# Iterator over the fields so we don't lose track of where we are
self._fields_iter = None
# Pre-render the headers so we can calculate the length
self._render_headers()
def __len__(self):
if self._len is None:
self._calculate_length()
return self._len
def _calculate_length(self):
boundary_len = len(self.boundary) # Length of --{boundary}
self._len = 0
for (header, data) in self._fields_list:
# boundary length + header length + body length + len('\r\n') * 2
self._len += boundary_len + len(header) + super_len(data) + 4
# Length of trailing boundary '--{boundary}--\r\n'
self._len += boundary_len + 4
@property
def content_type(self):
return str('multipart/form-data; boundary={0}'.format(
self.boundary_value
))
def to_string(self):
return self.read()
def read(self, size=None):
"""Read data from the streaming encoder.
:param int size: (optional), If provided, ``read`` will return exactly
that many bytes. If it is not provided, it will return the
remaining bytes.
:returns: bytes
"""
if size is not None:
size = int(size) # Ensure it is always an integer
bytes_length = len(self._buffer) # Calculate this once
size -= bytes_length if size > bytes_length else 0
self._load_bytes(size)
return self._buffer.read(size)
def _load_bytes(self, size):
written = 0
orig_position = self._buffer.tell()
# Consume previously unconsumed data
written += self._consume_current_data(size)
while size is None or written < size:
next_tuple = self._next_tuple()
if not next_tuple:
self.finished = True
break
headers, data = next_tuple
# We have a tuple, write the headers in their entirety.
# They aren't that large, if we write more than was requested, it
# should not hurt anyone much.
written += self._buffer.write(encode_with(headers, self.encoding))
self._current_data = coerce_data(data, self.encoding)
if size is not None and written < size:
size -= written
written += self._consume_current_data(size)
self._buffer.seek(orig_position, 0)
self._buffer.smart_truncate()
def _consume_current_data(self, size):
written = 0
# File objects need an integer size
if size is None:
size = -1
if self._current_data is None:
written = self._buffer.write(
encode_with(self.boundary, self.encoding)
)
written += self._buffer.write(encode_with('\r\n', self.encoding))
elif (self._current_data is not None and
super_len(self._current_data) > 0):
written = self._buffer.write(self._current_data.read(size))
if super_len(self._current_data) == 0 and not self.finished:
written += self._buffer.write(
encode_with('\r\n{0}\r\n'.format(self.boundary),
self.encoding)
)
return written
def _next_tuple(self):
next_tuple = tuple()
try:
# Try to get another field tuple
next_tuple = next(self._fields_iter)
except StopIteration:
# We reached the end of the list, so write the closing
# boundary. The last file tuple wrote a boundary like:
# --{boundary}\r\n, so move back two characters, truncate and
# write the proper ending.
if not self.finished:
self._buffer.seek(-2, 1)
self._buffer.truncate()
self._buffer.write(encode_with('--\r\n', self.encoding))
return next_tuple
def _render_headers(self):
e = self.encoding
iter_fields = iter_field_objects(self.fields)
self._fields_list = [
(f.render_headers(), readable_data(f.data, e)) for f in iter_fields
]
self._fields_iter = iter(self._fields_list)
def readable_data(data, encoding):
if hasattr(data, 'read'):
return data
return CustomBytesIO(data, encoding)
def coerce_data(data, encoding):
if not isinstance(data, CustomBytesIO):
if hasattr(data, 'getvalue'):
return CustomBytesIO(data.getvalue(), encoding)
if hasattr(data, 'fileno'):
return FileWrapper(data)
return data
class CustomBytesIO(io.BytesIO):
def __init__(self, buffer=None, encoding='utf-8'):
buffer = encode_with(buffer, encoding)
super(CustomBytesIO, self).__init__(buffer)
def _get_end(self):
current_pos = self.tell()
self.seek(0, 2)
length = self.tell()
self.seek(current_pos, 0)
return length
def __len__(self):
length = self._get_end()
return length - self.tell()
def smart_truncate(self):
to_be_read = len(self)
already_read = self._get_end() - to_be_read
if already_read >= to_be_read:
old_bytes = self.read()
self.seek(0, 0)
self.truncate()
self.write(old_bytes)
self.seek(0, 0) # We want to be at the beginning
class FileWrapper(object):
def __init__(self, file_object):
self.fd = file_object
def __len__(self):
return super_len(self.fd) - self.fd.tell()
def read(self, length=-1):
return self.fd.read(length)
|
|
"""GUI with pyqtgraph for digitizing images .
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import pyqtgraph as pg
import skimage as si
from skimage import color, io, exposure
from multiprocessing import Pool
import glob
import time
import sys
# these are the suspicous imports
import pims
import h5py
pg.setConfigOptions(antialias=True)
def mrawloader(fname, dtype=np.uint16, h=1024, w=1024, flip=True):
"""Load in an mraw as a memmaped numpy array.
Parameters
----------
fname : str
Location of the binary file.
dtype : np.dtype, default=np.uint16
Type of stored binary data.
h : int, default=1024
Height of the images.
w : int, default=1024
Width of the images.
flip : bool, default=True
Whether to flip the image or not.
Returns
-------
images : np.ndarray (numpy.core.memmap.memmap)
Numpy memmaped array
Notes
-----
This assumes that the images are not color. The data about the image
dimensions and number of frames can be extracted from the .cih file
(see pyadisi.metadata.photron).
Examples
--------
>>> images = pyadisi.gui.mrawloader('~/Desktop/2014-05-29_000001.mraw')
"""
images = np.memmap(fname, dtype, 'c').reshape(-1, h, w).swapaxes(1, 2)
return images
def mraw2tiff(savefolder, vid, interpolation='nearest', start=None, end=None, skip=None):
"""Write and mraw file out as a .tiff.
Parameters
----------
savefolder : path
Where to write tiff files out to
vid : np.ndarray or np.memmap
Video file, of shape (t, x, y)
start : int, default=None
First index to write from
end : int, default=None
Last index to write from
skip : int, default=1
Number of frames to skip when writing out
Notes
-----
This requires tifffile.py which can be downloaded from
http://www.lfd.uci.edu/~gohlke/code/tifffile.py
Examples
--------
>>> mraw2tiff('/home/isaac/Desktop/mrawex/', cam1.swapaxes(1, 2), start=60, end=70, skip=2)
"""
#TODO raise an ImportError
try:
from tifffile import imsave
except ImportError, error:
err = '''
tifffile.py not installed. Download it from \n
http://www.lfd.uci.edu/~gohlke/code/tifffile.py'''
raise err
#from skimage.io import imsave
#from scipy.misc import imsave
#from matplotlib.pyplot import imsave
if start is None:
start = 0
if end is None:
end = vid.shape[0]
if skip is None:
skip = 1
# save as grayscale if only three dimensions
if vid.ndim == 3:
cmap = plt.cm.gray
else:
cmap = None # color
if savefolder.endswith('/'):
savefolder = savefolder[:-1]
savebase = savefolder + '/{0:05d}.tiff'
for i in range(start, end, skip):
img = vid[i]
imsave(savebase.format(i), img)
#imsave(savebase.format(i), img, plugin='tiffile')
#imsave(savebase.format(i), img)
#plt.imsave(savebase.format(i), img, cmap=cmap, format='tiff')
def pimsloader(image_paths, flip=False, process_func=None):
"""Load in a stack of images using pims and ducktype it
so that we have the required methods.
Parameters
----------
image_paths : str
Path to the images to load.
flip : bool, default=False
Whether to flip the image or not.
Returns
-------
images : pims.image_sequence.ImageSequence
Images, but with required methods for the gui.
Example
-------
>>> images = pyadisi.gui.pimsload('~/Desktop/2014-05-29_000001/*/*.tif')
"""
def flipper(img):
return img[::-1, :] # .swapaxes(0, 1)
process_func = None if flip is False else flipper
images = pims.ImageSequence(image_paths, process_func=process_func)
# duck type on it (I feel this is a major abuse)
nframes = len(images)
dtype = images[0].dtype
ndim = images[0].ndim + 1
shape = [nframes]
for sh in images[0].shape:
shape.append(sh)
size = nframes * images[0].size
# ducktype it!
images.dtype = dtype
images.max = np.iinfo(dtype).max
images.min = np.iinfo(dtype).max
images.ndim = ndim
images.shape = shape
images.size = size
return images
def hdf5loader(fname, data_path):
"""Duck type on hdf5 to get the desired attributes...I cry.
Parameters
----------
fname : str
The hdf5 file to load.
data_path : str
The local path inside the hdf5 file to the data.
Returns
-------
images : h5py._hl.dataset.Dataset
Images with the required methods for the gui.
fp : h5py._hl.files.File
Open hdf5 file (so fp.close() can be used to gracefully close it).
Notes
-----
This assumes the data is (time, y, x, c).
Example
-------
>>> images = pyadisi.gui.pimsload('~/Desktop/2014-05-29_000001.hdf5', 'raw')
"""
fp = h5py.File(fname)
dat = fp[data_path]
dat.ndim = len(dat.shape)
dat.min = np.iinfo(dat.dtype).min
dat.max = np.iinfo(dat.dtype).max
return dat, fp
def imageviewer(images, crosshair=True, xvals=None):
"""View a stack of images (basically, a 4D numpy array).
Parameters
----------
images : image stack
Bastardized pims, hdf5, or ideally a memmaped binary file.
crosshair : bool, default=True
Whether to show the crosshair on the images.
xvals : np.ndarray, default=None
The time or frame count axis (can be negative for end triggers).
Returns
-------
imv : pyadisi.pyqtgraph.imageview.ImageView.ImageView
A neat viewer to investigate your image stack.
data : dict
The digitized locations, where keys are the frame number
and the (x, y) values are in a list.
proxy_chair : pyadisi.pyqtgraph.SignalProxy
Signal proxy for the crosshairs.
proxy_click : pyadisi.pyqtgraph.SignalProxy
Signal proxy for the mouse click events (how we digitize).
"""
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
pg.setConfigOptions(antialias=True)
## Always start by initializing Qt (only once per application)
#app = QtGui.QApplication([])
## Define a top-level widget to hold everything
win = QtGui.QWidget()
#win = QtGui.QMainWindow()
#win.resize(800, 800)
#layout = QtGui.QGridLayout()
#win.setLayout(layout)
# ImageView object to show video
imv = pg.ImageView(parent=win)
# add ImageView object to window
#win.setCentralWidget(imv)
#layout.addWidget(imv, 0, 0)
# display widget in new window
win.show()
# set the title of the window...
#imv.setWindowTitle('pyadisi is cool')
# we are not ready to use these yet, wo we hide them :)
imv.ui.roiBtn.hide()
imv.ui.normBtn.hide()
# fix quickMinMax...maybe
# We 'fix' this function so that we don't
# need a numpy array...This is only called if
# the images passed are not numpy ndarry.
if not isinstance(images, np.ndarray):
imv.quickMinMax = lambda x: (np.iinfo(x.dtype).min, np.iinfo(x.dtype).max)
# parameters to setImage (if we have a video, single color, single gray scale)
if images.ndim == 3 or images.ndim == 4:
if xvals is None:
xvals = np.arange(images.shape[0])
axes = {'t':0, 'x':1, 'y':2, 'c':3}
#elif images.ndim == 2:
# xvals = np.arange(images.shape[0])
# axes = {'x': 0, 'y': 1, 'c': 2}
elif images.ndim == 2:
xvals = None
axes = {'x': 0, 'y': 1}
# after it is doctored up, give it some images
imv.setImage(images, xvals=xvals, axes=axes, autoHistogramRange=True)
# we want to show the frame number on the image (eventually...)
vb = imv.getView()
label = pg.LabelItem(justify='right')
#vb.addItem(label)
label.setText("<span style='font-size: 26pt'>frame = {0}".format(imv.currentIndex))
# finally show (not sure when we have to do this)
#imv.show()
print('here....')
# we store the data in a dictionary
data = {}
def fill_data(current_index, point):
"""Add values to the dictionary storing marked points.
"""
key = '{0:05d}'.format(current_index)
if data.has_key(key):
data[key].append(point)
else:
data[key] = [point]
# how we register mouse clicks
def mouseClicked(evt):
mouseclick = evt[0]
pos = mouseclick.scenePos().toQPoint() # https://github.com/pyqtgraph/pyqtgraph/blob/develop/pyqtgraph/Point.py#L154
in_scene = imv.getImageItem().sceneBoundingRect().contains(pos)
if in_scene and mouseclick.button() == 1:
# .contains() requires a QtCore.QPointF, but we get a Point (subclassed from QtCore.QPointF) from the event
mousePoint = vb.mapSceneToView(pos)
#print('frame {2:4d} : (x, y): ({0:.5f}, {1:.5f})'.format(mousePoint.x(), mousePoint.y(), imv.currentIndex))
#sys.stdout.flush()
# push the data into the dictionary
fill_data(imv.currentIndex, (mousePoint.x(), mousePoint.y()))
proxy_click = pg.SignalProxy(imv.scene.sigMouseClicked, rateLimit=60, slot=mouseClicked)
if crosshair:
# cross hair as intersection of two infinite lines
vLine = pg.InfiniteLine(angle=90, movable=False)
hLine = pg.InfiniteLine(angle=0, movable=False)
imv.addItem(vLine, ignoreBounds=True)
imv.addItem(hLine, ignoreBounds=True)
def mouseMoved(evt):
pos = evt[0] ## using signal proxy turns original arguments into a tuple
if imv.getImageItem().sceneBoundingRect().contains(pos):
mousePoint = vb.mapSceneToView(pos)
vLine.setPos(mousePoint.x())
hLine.setPos(mousePoint.y())
# add the mouse callbacks for the crosshair
proxy_chair = pg.SignalProxy(imv.scene.sigMouseMoved, rateLimit=60, slot=mouseMoved)
else:
proxy_chair = None
#win.show()
# finally, execute the application
#app.exec_()
return win, imv, data, proxy_chair, proxy_click
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 18 17:45:51 2020
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import pytest
from statsmodels.regression.linear_model import OLS
import statsmodels.stats.power as smpwr
import statsmodels.stats.oneway as smo # needed for function with `test`
from statsmodels.stats.oneway import (
confint_effectsize_oneway, confint_noncentrality, effectsize_oneway,
anova_oneway,
anova_generic, equivalence_oneway, equivalence_oneway_generic,
power_equivalence_oneway, _power_equivalence_oneway_emp,
f2_to_wellek, fstat_to_wellek, wellek_to_f2)
from statsmodels.stats.robust_compare import scale_transform
from statsmodels.stats.contrast import (
wald_test_noncent_generic, wald_test_noncent, _offset_constraint)
def test_oneway_effectsize():
# examole 3 in Steiger 2004 Beyond the F-test, p. 169
F = 5
df1 = 3
df2 = 76
nobs = 80
ci = confint_noncentrality(F, (df1, df2), alpha=0.05,
alternative="two-sided")
ci_es = confint_effectsize_oneway(F, (df1, df2), alpha=0.05)
ci_steiger = ci_es.ci_f * np.sqrt(4 / 3)
res_ci_steiger = [0.1764, 0.7367]
res_ci_nc = np.asarray([1.8666, 32.563])
assert_allclose(ci, res_ci_nc, atol=0.0001)
assert_allclose(ci_es.ci_f_corrected, res_ci_steiger, atol=0.00006)
assert_allclose(ci_steiger, res_ci_steiger, atol=0.00006)
assert_allclose(ci_es.ci_f**2, res_ci_nc / nobs, atol=0.00006)
assert_allclose(ci_es.ci_nc, res_ci_nc, atol=0.0001)
def test_effectsize_power():
# example and results from PASS documentation
n_groups = 3
means = [527.86, 660.43, 649.14]
vars_ = 107.4304**2
nobs = 12
es = effectsize_oneway(means, vars_, nobs, use_var="equal", ddof_between=0)
es = np.sqrt(es)
alpha = 0.05
power = 0.8
nobs_t = nobs * n_groups
kwds = {'effect_size': es, 'nobs': nobs_t, 'alpha': alpha, 'power': power,
'k_groups': n_groups}
from statsmodels.stats.power import FTestAnovaPower
res_pow = 0.8251
res_es = 0.559
kwds_ = kwds.copy()
del kwds_['power']
p = FTestAnovaPower().power(**kwds_)
assert_allclose(p, res_pow, atol=0.0001)
assert_allclose(es, res_es, atol=0.0006)
# example unequal sample sizes
nobs = np.array([15, 9, 9])
kwds['nobs'] = nobs
es = effectsize_oneway(means, vars_, nobs, use_var="equal", ddof_between=0)
es = np.sqrt(es)
kwds['effect_size'] = es
p = FTestAnovaPower().power(**kwds_)
res_pow = 0.8297
res_es = 0.590
assert_allclose(p, res_pow, atol=0.005) # lower than print precision
assert_allclose(es, res_es, atol=0.0006)
def test_effectsize_fstat():
# results from R package `effectsize`, confint is 0.9 confidence
# > es = F_to_eta2(45.8, 3, 35)
Eta_Sq_partial = 0.796983758700696
CI_eta2 = 0.685670133284926, 0.855981325777856 # reformated from output
# > es = F_to_epsilon2(45.8, 3, 35)
Epsilon_Sq_partial = 0.779582366589327
CI_eps2 = 0.658727573280777, 0.843636867987386
# > es = F_to_omega2(45.8, 3, 35)
Omega_Sq_partial = 0.775086505190311
CI_omega2 = 0.65286429480169, 0.840179680453464
# > es = F_to_f(45.8, 3, 35)
Cohens_f_partial = 1.98134153686695
CI_f = 1.47694659580859, 2.43793847155554
f_stat, df1, df2 = 45.8, 3, 35
# nobs = df1 + df2 + 1 # not directly used in the following, only df
fes = smo._fstat2effectsize(f_stat, (df1, df2))
assert_allclose(np.sqrt(fes.f2), Cohens_f_partial, rtol=1e-13)
assert_allclose(fes.eta2, Eta_Sq_partial, rtol=1e-13)
assert_allclose(fes.eps2, Epsilon_Sq_partial, rtol=1e-13)
assert_allclose(fes.omega2, Omega_Sq_partial, rtol=1e-13)
ci_nc = confint_noncentrality(f_stat, (df1, df2), alpha=0.1)
# the following replicates R package effectsize
ci_es = smo._fstat2effectsize(ci_nc / df1, (df1, df2))
assert_allclose(ci_es.eta2, CI_eta2, rtol=2e-4)
assert_allclose(ci_es.eps2, CI_eps2, rtol=2e-4)
assert_allclose(ci_es.omega2, CI_omega2, rtol=2e-4)
assert_allclose(np.sqrt(ci_es.f2), CI_f, rtol=2e-4)
def test_effectsize_fstat_stata():
# reference numbers computed with Stata 14
# Stata 16 does not seem to have confint for omega2
# esizei 2 40 7.47403193349075, level(90)
eta2 = 0.2720398648288652
lb_eta2 = 0.0742092468714613
ub_eta2 = 0.4156116886974804
omega2 = 0.2356418580703085
lb_omega2 = 0.0279197092150344
ub_omega2 = 0.3863922731323545
# level = 90
f_stat, df1, df2 = 7.47403193349075, 2, 40
fes = smo._fstat2effectsize(f_stat, (df1, df2))
assert_allclose(fes.eta2, eta2, rtol=1e-13)
assert_allclose(fes.omega2, omega2, rtol=0.02) # low agreement
ci_es = smo.confint_effectsize_oneway(f_stat, (df1, df2), alpha=0.1)
assert_allclose(ci_es.eta2, (lb_eta2, ub_eta2), rtol=1e-4)
assert_allclose(ci_es.ci_omega2, (lb_omega2, ub_omega2), rtol=0.025)
@pytest.mark.parametrize("center", ['median', 'mean', 'trimmed'])
def test_scale_transform(center):
x = np.random.randn(5, 3)
xt = scale_transform(x, center=center, transform='abs', trim_frac=0.2,
axis=0)
xtt = scale_transform(x.T, center=center, transform='abs', trim_frac=0.2,
axis=1)
assert_allclose(xt.T, xtt, rtol=1e-13)
xt0 = scale_transform(x[:, 0], center=center, transform='abs',
trim_frac=0.2)
assert_allclose(xt0, xt[:, 0], rtol=1e-13)
assert_allclose(xt0, xtt[0, :], rtol=1e-13)
class TestOnewayEquivalenc(object):
@classmethod
def setup_class(cls):
y0 = [112.488, 103.738, 86.344, 101.708, 95.108, 105.931,
95.815, 91.864, 102.479, 102.644]
y1 = [100.421, 101.966, 99.636, 105.983, 88.377, 102.618,
105.486, 98.662, 94.137, 98.626, 89.367, 106.204]
y2 = [84.846, 100.488, 119.763, 103.736, 93.141, 108.254,
99.510, 89.005, 108.200, 82.209, 100.104, 103.706,
107.067]
y3 = [100.825, 100.255, 103.363, 93.230, 95.325, 100.288,
94.750, 107.129, 98.246, 96.365, 99.740, 106.049,
92.691, 93.111, 98.243]
n_groups = 4
arrs_w = [np.asarray(yi) for yi in [y0, y1, y2, y3]]
nobs = np.asarray([len(yi) for yi in arrs_w])
nobs_mean = np.mean(nobs)
means = np.asarray([yi.mean() for yi in arrs_w])
stds = np.asarray([yi.std(ddof=1) for yi in arrs_w])
cls.data = arrs_w # TODO use `data`
cls.means = means
cls.nobs = nobs
cls.stds = stds
cls.n_groups = n_groups
cls.nobs_mean = nobs_mean
def test_equivalence_equal(self):
# reference numbers from Jan and Shieh 2019, p. 5
means = self.means
nobs = self.nobs
stds = self.stds
n_groups = self.n_groups
eps = 0.5
res0 = anova_generic(means, stds**2, nobs, use_var="equal")
f = res0.statistic
res = equivalence_oneway_generic(f, n_groups, nobs.sum(), eps,
res0.df, alpha=0.05,
margin_type="wellek")
assert_allclose(res.pvalue, 0.0083, atol=0.001)
assert_equal(res.df, [3, 46])
# the agreement for f-stat looks too low
assert_allclose(f, 0.0926, atol=0.0006)
res = equivalence_oneway(self.data, eps, use_var="equal",
margin_type="wellek")
assert_allclose(res.pvalue, 0.0083, atol=0.001)
assert_equal(res.df, [3, 46])
def test_equivalence_welch(self):
# reference numbers from Jan and Shieh 2019, p. 6
means = self.means
nobs = self.nobs
stds = self.stds
n_groups = self.n_groups
vars_ = stds**2
eps = 0.5
res0 = anova_generic(means, vars_, nobs, use_var="unequal",
welch_correction=False)
f_stat = res0.statistic
res = equivalence_oneway_generic(f_stat, n_groups, nobs.sum(), eps,
res0.df, alpha=0.05,
margin_type="wellek")
assert_allclose(res.pvalue, 0.0110, atol=0.001)
assert_allclose(res.df, [3.0, 22.6536], atol=0.0006)
# agreement for Welch f-stat looks too low b/c welch_correction=False
assert_allclose(f_stat, 0.1102, atol=0.007)
res = equivalence_oneway(self.data, eps, use_var="unequal",
margin_type="wellek")
assert_allclose(res.pvalue, 0.0110, atol=1e-4)
assert_allclose(res.df, [3.0, 22.6536], atol=0.0006)
assert_allclose(res.f_stat, 0.1102, atol=1e-4) # 0.007)
# check post-hoc power, JS p. 6
pow_ = _power_equivalence_oneway_emp(f_stat, n_groups, nobs, eps,
res0.df)
assert_allclose(pow_, 0.1552, atol=0.007)
pow_ = power_equivalence_oneway(eps, eps, nobs.sum(),
n_groups=n_groups, df=None, alpha=0.05,
margin_type="wellek")
assert_allclose(pow_, 0.05, atol=1e-13)
nobs_t = nobs.sum()
es = effectsize_oneway(means, vars_, nobs, use_var="unequal")
es = np.sqrt(es)
es_w0 = f2_to_wellek(es**2, n_groups)
es_w = np.sqrt(fstat_to_wellek(f_stat, n_groups, nobs_t / n_groups))
pow_ = power_equivalence_oneway(es_w, eps, nobs_t,
n_groups=n_groups, df=None, alpha=0.05,
margin_type="wellek")
assert_allclose(pow_, 0.1552, atol=0.007)
assert_allclose(es_w0, es_w, atol=0.007)
margin = wellek_to_f2(eps, n_groups)
pow_ = power_equivalence_oneway(es**2, margin, nobs_t,
n_groups=n_groups, df=None, alpha=0.05,
margin_type="f2")
assert_allclose(pow_, 0.1552, atol=0.007)
class TestOnewayScale(object):
@classmethod
def setup_class(cls):
yt0 = np.array([102., 320., 0., 107., 198., 200., 4., 20., 110., 128.,
7., 119., 309.])
yt1 = np.array([0., 1., 228., 81., 87., 119., 79., 181., 43., 12., 90.,
105., 108., 119., 0., 9.])
yt2 = np.array([33., 294., 134., 216., 83., 105., 69., 20., 20., 63.,
98., 155., 78., 75.])
y0 = np.array([452., 874., 554., 447., 356., 754., 558., 574., 664.,
682., 547., 435., 245.])
y1 = np.array([546., 547., 774., 465., 459., 665., 467., 365., 589.,
534., 456., 651., 654., 665., 546., 537.])
y2 = np.array([785., 458., 886., 536., 669., 857., 821., 772., 732.,
689., 654., 597., 830., 827.])
n_groups = 3
data = [y0, y1, y2]
nobs = np.asarray([len(yi) for yi in data])
nobs_mean = np.mean(nobs)
means = np.asarray([yi.mean() for yi in data])
stds = np.asarray([yi.std(ddof=1) for yi in data])
cls.data = data
cls.data_transformed = [yt0, yt1, yt2]
cls.means = means
cls.nobs = nobs
cls.stds = stds
cls.n_groups = n_groups
cls.nobs_mean = nobs_mean
def test_means(self):
# library onewaystats, BF test for equality of means
# st = bf.test(y ~ g, df3)
statistic = 7.10900606421182
parameter = [2, 31.4207256105052]
p_value = 0.00283841965791224
# method = 'Brown-Forsythe Test'
res = anova_oneway(self.data, use_var="bf")
# R bf.test uses original BF df_num
assert_allclose(res.pvalue2, p_value, rtol=1e-13)
assert_allclose(res.statistic, statistic, rtol=1e-13)
assert_allclose([res.df_num2, res.df_denom], parameter)
def test_levene(self):
data = self.data
# lawstat: Test Statistic = 1.0866123063642, p-value = 0.3471072204516
statistic = 1.0866123063642
p_value = 0.3471072204516
res0 = smo.test_scale_oneway(data, method='equal', center='median',
transform='abs', trim_frac_mean=0.2)
assert_allclose(res0.pvalue, p_value, rtol=1e-13)
assert_allclose(res0.statistic, statistic, rtol=1e-13)
# library car
# > lt = leveneTest(y ~ g, df3, center=mean, trim=0.2)
statistic = 1.10732113109744
p_value = 0.340359251994645
df = [2, 40]
res0 = smo.test_scale_oneway(data, method='equal', center='trimmed',
transform='abs', trim_frac_mean=0.2)
assert_allclose(res0.pvalue, p_value, rtol=1e-13)
assert_allclose(res0.statistic, statistic, rtol=1e-13)
assert_allclose(res0.df, df)
# library(onewaytests)
# test uses mean as center
# > st = homog.test(y ~ g, df3)
statistic = 1.07894485177512
parameter = [2, 40] # df
p_value = 0.349641166869223
# method = "Levene's Homogeneity Test"
res0 = smo.test_scale_oneway(data, method='equal', center='mean',
transform='abs', trim_frac_mean=0.2)
assert_allclose(res0.pvalue, p_value, rtol=1e-13)
assert_allclose(res0.statistic, statistic, rtol=1e-13)
assert_allclose(res0.df, parameter)
# > st = homog.test(y ~ g, df3, method = "Bartlett")
statistic = 3.01982414477323
# parameter = 2 # scipy bartlett does not return df
p_value = 0.220929402900495
# method = "Bartlett's Homogeneity Test"
# Bartlett is in scipy.stats
from scipy import stats
stat, pv = stats.bartlett(*data)
assert_allclose(pv, p_value, rtol=1e-13)
assert_allclose(stat, statistic, rtol=1e-13)
def test_options(self):
# regression tests for options,
# many might not be implemented in other packages
data = self.data
# regression numbers from initial run
statistic, p_value = 1.0173464626246675, 0.3763806150460239
df = (2.0, 24.40374758005409)
res = smo.test_scale_oneway(data, method='unequal', center='median',
transform='abs', trim_frac_mean=0.2)
assert_allclose(res.pvalue, p_value, rtol=1e-13)
assert_allclose(res.statistic, statistic, rtol=1e-13)
assert_allclose(res.df, df)
statistic, p_value = 1.0329722145270606, 0.3622778213868562
df = (1.83153791573948, 30.6733640949525)
p_value2 = 0.3679999679787619
df2 = (2, 30.6733640949525)
res = smo.test_scale_oneway(data, method='bf', center='median',
transform='abs', trim_frac_mean=0.2)
assert_allclose(res.pvalue, p_value, rtol=1e-13)
assert_allclose(res.statistic, statistic, rtol=1e-13)
assert_allclose(res.df, df)
assert_allclose(res.pvalue2, p_value2, rtol=1e-13)
assert_allclose(res.df2, df2)
statistic, p_value = 1.7252431333701745, 0.19112038168209514
df = (2.0, 40.0)
res = smo.test_scale_oneway(data, method='equal', center='mean',
transform='square', trim_frac_mean=0.2)
assert_allclose(res.pvalue, p_value, rtol=1e-13)
assert_allclose(res.statistic, statistic, rtol=1e-13)
assert_equal(res.df, df)
statistic, p_value = 0.4129696057329463, 0.6644711582864451
df = (2.0, 40.0)
res = smo.test_scale_oneway(data, method='equal', center='mean',
transform=lambda x: np.log(x * x), # noqa
trim_frac_mean=0.2)
assert_allclose(res.pvalue, p_value, rtol=1e-13)
assert_allclose(res.statistic, statistic, rtol=1e-13)
assert_allclose(res.df, df)
# compare no transform with standard anova
res = smo.test_scale_oneway(data, method='unequal', center=0,
transform='identity', trim_frac_mean=0.2)
res2 = anova_oneway(self.data, use_var="unequal")
assert_allclose(res.pvalue, res2.pvalue, rtol=1e-13)
assert_allclose(res.statistic, res2.statistic, rtol=1e-13)
assert_allclose(res.df, res2.df)
def test_equivalence(self):
data = self.data
# compare no transform with standard anova
res = smo.equivalence_scale_oneway(data, 0.5, method='unequal',
center=0,
transform='identity')
res2 = equivalence_oneway(self.data, 0.5, use_var="unequal")
assert_allclose(res.pvalue, res2.pvalue, rtol=1e-13)
assert_allclose(res.statistic, res2.statistic, rtol=1e-13)
assert_allclose(res.df, res2.df)
res = smo.equivalence_scale_oneway(data, 0.5, method='bf',
center=0,
transform='identity')
res2 = equivalence_oneway(self.data, 0.5, use_var="bf")
assert_allclose(res.pvalue, res2.pvalue, rtol=1e-13)
assert_allclose(res.statistic, res2.statistic, rtol=1e-13)
assert_allclose(res.df, res2.df)
class TestOnewayOLS(object):
@classmethod
def setup_class(cls):
y0 = [112.488, 103.738, 86.344, 101.708, 95.108, 105.931,
95.815, 91.864, 102.479, 102.644]
y1 = [100.421, 101.966, 99.636, 105.983, 88.377, 102.618,
105.486, 98.662, 94.137, 98.626, 89.367, 106.204]
y2 = [84.846, 100.488, 119.763, 103.736, 93.141, 108.254,
99.510, 89.005, 108.200, 82.209, 100.104, 103.706,
107.067]
y3 = [100.825, 100.255, 103.363, 93.230, 95.325, 100.288,
94.750, 107.129, 98.246, 96.365, 99.740, 106.049,
92.691, 93.111, 98.243]
cls.k_groups = k = 4
cls.data = data = [y0, y1, y2, y3]
cls.nobs = nobs = np.asarray([len(yi) for yi in data])
groups = np.repeat(np.arange(k), nobs)
cls.ex = (groups[:, None] == np.arange(k)).astype(np.int64)
cls.y = np.concatenate(data)
def test_ols_noncentrality(self):
k = self.k_groups
res_ols = OLS(self.y, self.ex).fit()
nobs_t = res_ols.model.nobs
# constraint
c_equal = -np.eye(k)[1:]
c_equal[:, 0] = 1
v = np.zeros(c_equal.shape[0])
# noncentrality at estimated parameters
wt = res_ols.wald_test(c_equal)
df_num, df_denom = wt.df_num, wt.df_denom
cov_p = res_ols.cov_params()
nc_wt = wald_test_noncent_generic(res_ols.params, c_equal, v, cov_p,
diff=None, joint=True)
assert_allclose(nc_wt, wt.statistic * wt.df_num, rtol=1e-13)
nc_wt2 = wald_test_noncent(res_ols.params, c_equal, v, res_ols,
diff=None, joint=True)
assert_allclose(nc_wt2, nc_wt, rtol=1e-13)
es_ols = nc_wt / nobs_t
es_oneway = smo.effectsize_oneway(res_ols.params, res_ols.scale,
self.nobs, use_var="equal")
assert_allclose(es_ols, es_oneway, rtol=1e-13)
alpha = 0.05
pow_ols = smpwr.ftest_power(np.sqrt(es_ols), df_denom, df_num, alpha,
ncc=1)
pow_oneway = smpwr.ftest_anova_power(np.sqrt(es_oneway), nobs_t, alpha,
k_groups=k, df=None)
assert_allclose(pow_ols, pow_oneway, rtol=1e-13)
# noncentrality at other params
params_alt = res_ols.params * 0.75
# compute constraint value so we can get noncentrality from wald_test
v_off = _offset_constraint(c_equal, res_ols.params, params_alt)
wt_off = res_ols.wald_test((c_equal, v + v_off))
nc_wt_off = wald_test_noncent_generic(params_alt, c_equal, v,
cov_p, diff=None, joint=True)
assert_allclose(nc_wt_off, wt_off.statistic * wt_off.df_num,
rtol=1e-13)
# check vectorized version, joint=False
nc_wt_vec = wald_test_noncent_generic(params_alt, c_equal, v,
cov_p, diff=None, joint=False)
for i in range(c_equal.shape[0]):
nc_wt_i = wald_test_noncent_generic(params_alt, c_equal[i : i + 1], # noqa
v[i : i + 1], cov_p, diff=None, # noqa
joint=False)
assert_allclose(nc_wt_vec[i], nc_wt_i, rtol=1e-13)
def test_simulate_equivalence():
# regression test, needs large k_mc to be reliable
k_groups = 4
k_repl = 10
nobs = np.array([10, 12, 13, 15]) * k_repl
means = np.array([-1, 0, 0, 1]) * 0.12
vars_ = np.array([1, 2, 3, 4])
nobs_t = nobs.sum()
eps = 0.0191 * 10
opt_var = ["unequal", "equal", "bf"]
k_mc = 100
np.random.seed(987126)
res_mc = smo.simulate_power_equivalence_oneway(
means, nobs, eps, vars_=vars_, k_mc=k_mc, trim_frac=0.1,
options_var=opt_var, margin_type="wellek")
frac_reject = (res_mc.pvalue <= 0.05).sum(0) / k_mc
assert_allclose(frac_reject, [0.17, 0.18, 0.14], atol=0.001)
# result with k_mc = 10000 is [0.1466, 0.1871, 0.1606]
# similar to asy below, but not very close for all
es_alt_li = []
for uv in opt_var:
es = effectsize_oneway(means, vars_, nobs, use_var=uv)
es_alt_li.append(es)
# compute asy power as comparison
margin = wellek_to_f2(eps, k_groups)
pow_ = [power_equivalence_oneway(
es_, margin, nobs_t, n_groups=k_groups, df=None, alpha=0.05,
margin_type="f2") for es_ in es_alt_li]
# regression test numbers
assert_allclose(pow_, [0.147749, 0.173358, 0.177412], atol=0.007)
|
|
import unittest
from hub_dispatch import HubDispatch
class TestTopology(unittest.TestCase):
def test_empty_graph(self):
h = HubDispatch()
with self.assertRaises(Exception) as exc:
h.remove_hub('foo')
self.assertEqual(
exc.exception.message,
"Hub 'foo' does not exist"
)
with self.assertRaises(Exception) as exc:
h.link('foo', 'bar')
self.assertEqual(
exc.exception.message,
"Hub 'foo' does not exist"
)
with self.assertRaises(Exception) as exc:
h.unlink('foo', 'bar')
self.assertEqual(
exc.exception.message,
"Hub 'foo' does not exist"
)
def test_add_hub_linked_to_node(self):
h = HubDispatch()
h.add_hub('foo')
self.assertEqual(h._changes.assignments, [('foo', 'foo')])
h.link('foo', 'bar')
self.assertEqual(
h._topology.nodes,
{'bar': 'foo', 'foo': 'foo'},
)
self.assertEqual(
h._topology.hubs,
{'foo': 2},
)
self.assertEqual(h._changes.assignments, [
('foo', 'foo'),
('foo', 'bar')
])
h._changes._clear()
def test_unlink_hub_with_lonely_node(self):
h = HubDispatch().add_hub('foo').link('foo', 'bar')
h._changes._clear()
with self.assertRaises(Exception) as exc:
h.unlink('foo', 'unknown-node')
self.assertEqual(
exc.exception.message,
"Hub 'foo' is not connected to node 'unknown-node'"
)
h.unlink('foo', 'bar')
self.assertEqual(h._topology.nodes, {'foo': 'foo'})
self.assertEqual(h._topology.hubs, {'foo': 1})
self.assertEqual(h._changes.assignments, [])
self.assertEqual(h._changes.unassignments, [('foo', 'bar')])
def test_unlink_hub_with_lonely_nodes(self):
h = HubDispatch().add_hub('foo').link('foo', 'n1').link('foo', 'n2')
h._changes._clear()
h.unlink('foo', 'n1')
self.assertEqual(h._topology.nodes, {'n2': 'foo', 'foo': 'foo'})
self.assertEqual(h._topology.hubs, {'foo': 2})
self.assertEqual(h._changes.assignments, [])
self.assertEqual(h._changes.unassignments, [('foo', 'n1')])
def test_remove_hub_with_lonely_links(self):
h = HubDispatch().add_hub('foo').link('foo', 'bar')
h._changes._clear()
h.remove_hub('foo')
self.assertEqual(h._topology.nodes, {})
self.assertEqual(h._topology.hubs, {})
self.assertEqual(h._changes.assignments, [])
self.assertEqual(h._changes.unassignments, [
('foo', 'foo'),
('foo', 'bar')
])
def test_remove_hub_following_hub(self):
h = HubDispatch().add_hub('foo', 'bar').link('foo', 'bar')
self.assertEqual(h._topology.nodes, {'bar': 'bar', 'foo': 'foo'})
self.assertEqual(h._topology.hubs, {'foo': 1, 'bar': 1})
self.assertEqual(h._changes.assignments, [
('foo', 'foo'), ('bar', 'bar')
])
self.assertEqual(h._changes.unassignments, [])
h._changes._clear()
h.remove_hub('foo')
self.assertEqual(h._topology.nodes, {'bar': 'bar'})
self.assertEqual(h._topology.hubs, {'bar': 1})
self.assertEqual(h._changes.assignments, [])
self.assertEqual(h._changes.unassignments, [('foo', 'foo')])
def test_hub_follows_followee(self):
h = HubDispatch().add_hub('foo', 'bar')\
.link('foo', 'bar').link('bar', 'foo')
self.assertEqual(h._topology.nodes, {'foo': 'foo', 'bar': 'bar'})
self.assertEqual(h._topology.hubs, {'foo': 1, 'bar': 1})
self.assertEqual(h._changes.assignments, [
('foo', 'foo'),
('bar', 'bar'),
])
self.assertEqual(h._changes.unassignments, [])
def test_remove_hub_with_shared_link(self):
h = HubDispatch()\
.add_hub('h1', 'h2')\
.link('h1', 'node').link('h2', 'node')
self.assertEqual(h._topology.nodes,
{'node': 'h1', 'h1': 'h1', 'h2': 'h2'})
self.assertEqual(h._topology.hubs, {'h1': 2, 'h2': 1})
self.assertEqual(h._changes.assignments,
[('h1', 'h1'), ('h2', 'h2'), ('h1', 'node')])
self.assertEqual(h._changes.unassignments, [])
h._changes._clear()
# remove assigned hub
h.unlink('h1', 'node')
self.assertEqual(h._topology.nodes,
{'h1': 'h1', 'h2': 'h2', 'node': 'h2'})
self.assertEqual(h._topology.hubs, {'h2': 2, 'h1': 1})
self.assertEqual(h._changes.assignments, [('h2', 'node')])
self.assertEqual(h._changes.unassignments, [('h1', 'node')])
def test_remove_unassigned_hub(self):
h = HubDispatch()\
.add_hub('h1', 'h2')\
.link('h1', 'node').link('h2', 'node')
h._changes._clear()
h.unlink('h2', 'node')
self.assertEqual(h._topology.nodes, {
'node': 'h1',
'h1': 'h1',
'h2': 'h2',
})
self.assertEqual(h._topology.hubs, {'h1': 2, 'h2': 1})
def test_assign_to_least_loaded(self):
h = HubDispatch()\
.add_hub('h1', 'h2', 'h3', 'h4')\
.link('h1', 'node')\
.link('h4', 'node', 'foo', 'plop', 'pika')\
.link('h2', 'node', 'foo', 'bar')\
.link('h3', 'node')
h._changes._clear()
h.unlink('h1', 'node')
self.assertEqual(
h._topology.nodes,
{'h1': 'h1', 'h2': 'h2', 'h3': 'h3', 'h4': 'h4', 'foo': 'h4',
'bar': 'h2', 'node': 'h3', 'plop': 'h4', 'pika': 'h4'}
)
self.assertEqual(h._topology.hubs,
{'h2': 2, 'h3': 2, 'h4': 4, 'h1': 1})
self.assertEqual(h._changes.assignments, [('h3', 'node')])
self.assertEqual(h._changes.unassignments, [('h1', 'node')])
def test_reassign_node_on_hub_removal(self):
h = HubDispatch().add_hub('A', 'C').link('A', 'B').link('C', 'B')
self.assertEqual(h._topology.nodes, {
'A': 'A', 'B': 'A', 'C': 'C'
})
h._changes._clear()
h.remove_hub('A')
self.assertEqual(h._topology.nodes, {
'B': 'C', 'C': 'C'
})
self.assertEqual(h._changes.unassignments, [('A', 'A'), ('A', 'B')])
self.assertEqual(h._changes.assignments, [('C', 'B')])
def test_promote_hub_a_followed_node(self):
h = HubDispatch().add_hub('A').link('A', 'B')
h._changes._clear()
h.add_hub('B')
self.assertEqual(h._topology.nodes, {
'A': 'A', 'B': 'A'
})
self.assertEqual(h._topology.hubs, {'A': 2})
def test_reassignment_when_hub_have_multiple_assignees(self):
h = HubDispatch()\
.add_hub('h1', 'h2')\
.link('h1', 'n1', 'n2')\
.link('h2', 'n1')
h._changes._clear()
h.unlink('h1', 'n1')
self.assertEqual(h._topology.nodes, {
'h1': 'h1', 'h2': 'h2',
'n1': 'h2', 'n2': 'h1',
})
self.assertEqual(h._topology.hubs, {'h1': 2, 'h2': 2})
self.assertEqual(h._changes.assignments, [('h2', 'n1')])
self.assertEqual(h._changes.unassignments, [('h1', 'n1')])
def test_least_loaded_func(self):
h = HubDispatch()
h._topology.hubs['foo'] = 0
h._topology.hubs['bar'] = 42
self.assertEqual('foo', h._least_loaded('foo', 'bar'))
self.assertEqual('foo', h._least_loaded('bar', 'foo'))
if __name__ == '__main__':
unittest.main()
|
|
'''
Run the tests using testrunner.py script in the project root directory.
Usage: testrunner.py SDK_PATH TEST_PATH
Run unit tests for App Engine apps.
SDK_PATH Path to the SDK installation
TEST_PATH Path to package containing test modules
Options:
-h, --help show this help message and exit
'''
import unittest
import webapp2
import os
import webtest
from google.appengine.ext import testbed
from mock import Mock
from mock import patch
import boilerplate
from boilerplate import models
from boilerplate import config as boilerplate_config
import config
import routes
from boilerplate import routes as boilerplate_routes
from boilerplate.lib import utils
from boilerplate.lib import captcha
from boilerplate.lib import i18n
from boilerplate.lib import test_helpers
from web.models import Translation, Content, Canon, BookStructure, ChapterStructure, Translation, BookTranslation, ChapterTranslation
# setting HTTP_HOST in extra_environ parameter for TestApp is not enough for taskqueue stub
os.environ['HTTP_HOST'] = 'localhost'
# globals
network = False
# mock Internet calls
if not network:
i18n.get_territory_from_ip = Mock(return_value=None)
class AppTest(unittest.TestCase, test_helpers.HandlerHelpers):
def setUp(self):
# create a WSGI application.
webapp2_config = boilerplate_config.config
webapp2_config.update(config.config)
self.app = webapp2.WSGIApplication(config=webapp2_config)
routes.add_routes(self.app)
boilerplate_routes.add_routes(self.app)
self.testapp = webtest.TestApp(self.app, extra_environ={'REMOTE_ADDR': '127.0.0.1'})
# use absolute path for templates
self.app.config['webapp2_extras.jinja2']['template_path'] = [
os.path.join(os.path.dirname(boilerplate.__file__), '../templates'),
os.path.join(os.path.dirname(boilerplate.__file__), 'templates')]
# activate GAE stubs
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_taskqueue_stub()
self.testbed.init_mail_stub()
self.mail_stub = self.testbed.get_stub(testbed.MAIL_SERVICE_NAME)
self.taskqueue_stub = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
self.testbed.init_user_stub()
self.headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) Version/6.0 Safari/536.25',
'Accept-Language': 'en_US'}
# fix configuration if this is still a raw boilerplate code - required by test with mails
if not utils.is_email_valid(self.app.config.get('contact_sender')):
self.app.config['contact_sender'] = "noreply-testapp@example.com"
if not utils.is_email_valid(self.app.config.get('contact_recipient')):
self.app.config['contact_recipient'] = "support-testapp@example.com"
def tearDown(self):
self.testbed.deactivate()
def test_config_environment(self):
self.assertEquals(self.app.config.get('environment'), 'testing')
class ModelTest(unittest.TestCase):
def setUp(self):
# activate GAE stubs
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
def tearDown(self):
self.testbed.deactivate()
def testCanon(self):
#
# test Canon.create
#
canon = Canon.create('TEST')
self.assertIsNotNone(canon)
# check if created and retrievable
canon = Canon.make_key('TEST').get()
self.assertIsNotNone(canon)
#
# test Canon.add_book
#
book1 = Canon.add_or_update_book('TEST', book_code='1tes', title='1 Testing', chapters=[ChapterStructure(num_of_verses=5), ChapterStructure(num_of_verses=3)])
self.assertIsNotNone(book1)
self.assertEqual(Canon.make_key('TEST').get().num_of_books, 1)
# book should have canon as parent, and book id as key
self.assertEqual(book1.key.parent(), canon.key)
self.assertEqual(book1.key.string_id(), '1tes')
book2 = Canon.add_or_update_book('TEST', book_code='2tes', title='2 Testing', chapters=[ChapterStructure(num_of_verses=5), ChapterStructure(num_of_verses=3), ChapterStructure(num_of_verses=4)])
self.assertIsNotNone(book2)
# book should have canon as parent, and book id as key
self.assertEqual(book2.key.parent(), canon.key)
self.assertEqual(book2.key.string_id(), '2tes')
self.assertEqual(Canon.make_key('TEST').get().num_of_books, 2)
self.assertEqual(BookStructure.query().count(1000), 2)
#
# test Canon.reset
#
Canon.reset('TEST')
self.assertEqual(Canon.make_key('TEST').get().num_of_books, 0)
self.assertEqual(BookStructure.query().count(1000), 0)
#
# test Canon.build
#
Canon.build('TEST', [
{'book_code':'gen', 'title':'Genesis', 'chapters':[2,3,4]},
{'book_code':'1tes', 'title':'1 Testing', 'chapters':[5,6]},
{'book_code':'2tes', 'title':'2 Testing', 'chapters':[7,8,9,10]},
])
self.assertEqual(Canon.make_key('TEST').get().num_of_books, 3)
self.assertEqual(BookStructure.query().count(1000), 3)
book1 = BookStructure.make_key('TEST', 'gen').get()
self.assertEqual(book1.num_of_chapters, 3)
self.assertEqual(book1.get_chapter(1).num_of_verses, 2)
self.assertEqual(book1.get_chapter(2).num_of_verses, 3)
self.assertEqual(book1.get_chapter(3).num_of_verses, 4)
book2 = BookStructure.make_key('TEST', '1tes').get()
self.assertEqual(book2.num_of_chapters, 2)
book3 = BookStructure.make_key('TEST', '2tes').get()
self.assertEqual(book3.num_of_chapters, 4)
def testTranslation(self):
#
# init
#
canon = Canon.create('TEST')
#
# Translation test
#
translation = Translation.create('TEST', 'en-TST', title='English Test')
self.assertIsNotNone(translation)
# check if created and retrievable
translation = Translation.make_key('TEST', 'en-TST').get()
self.assertIsNotNone(translation)
#
# Book Test
#
# test failed to add non-existant book
book = Translation.add_or_update_book(translation.key, 'should-fail')
self.assertIsNone(book) # should fail, book structure doesn't exist
# test add book successfully
book1 = Canon.add_or_update_book('TEST', book_code='1tes', title='1 Testing', chapters=[ChapterStructure(num_of_verses=5), ChapterStructure(num_of_verses=3)])
book_translation = Translation.add_or_update_book(translation.key, '1tes')
self.assertIsNotNone(book_translation)
#
# Translation build test
#
# build canon first
Canon.build('TEST', [
{'book_code':'gen', 'title':'Genesis', 'chapters':[2,3,4]},
{'book_code':'1tes', 'title':'1 Testing', 'chapters':[5,6]},
{'book_code':'2tes', 'title':'2 Testing', 'chapters':[7,8,9,10]},
])
Translation.build(translation.key, translation_data={
'title' : 'translation title',
'copyright' : 'translation copyright',
'details' : {},
'books' : [
# array of books
{
'book_code': 'gen',
'title': 'Genesis',
'lookup': ['gen'],
'details': {},
1: {
1: { 'text': 'In the beginning.', },
2: { 'text': 'And the earth was without form.', },
}
}
]
})
chapter = ChapterTranslation.make_key(translation.key, 'gen', 1).get()
self.assertIsNotNone(chapter)
self.assertEqual(chapter.verses[1].text, 'In the beginning.')
self.assertEqual(chapter.verses[2].text, 'And the earth was without form.')
if __name__ == "__main__":
unittest.main()
|
|
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
from netforce.utils import get_data_path
import time
from netforce.access import get_active_company
from netforce import database
class StockCount(Model):
_name = "stock.count"
_string = "Stock Count"
_audit_log = True
_name_field = "number"
_multi_company = True
_fields = {
"number": fields.Char("Number", required=True, search=True),
"location_id": fields.Many2One("stock.location", "Warehouse", condition=[["type", "=", "internal"]], required=True, search=True),
"date": fields.Date("Date", required=True, search=True),
"description": fields.Char("Description"),
"state": fields.Selection([("draft", "Draft"), ("done", "Completed"), ("voided", "Voided")], "Status", required=True),
"lines": fields.One2Many("stock.count.line", "count_id", "Lines"),
"moves": fields.One2Many("stock.move", "related_id", "Stock Movements"),
"comments": fields.One2Many("message", "related_id", "Comments"),
"company_id": fields.Many2One("company", "Company"),
"journal_id": fields.Many2One("stock.journal", "Journal"),
"total_cost_amount": fields.Decimal("Total New Cost Amount",function="get_total_cost_amount"),
}
_order="date desc"
def _get_journal(self, context={}):
settings=get_model("settings").browse(1)
if settings.stock_count_journal_id:
return settings.stock_count_journal_id.id
def _get_number(self, context={}):
while 1:
num = get_model("sequence").get_number("stock_count")
if not num:
return None
res = self.search([["number", "=", num]])
if not res:
return num
get_model("sequence").increment("stock_count")
_defaults = {
"state": "draft",
"date": lambda *a: time.strftime("%Y-%m-%d"),
"number": _get_number,
"company_id": lambda *a: get_active_company(),
'journal_id': _get_journal,
}
def delete_lines(self, ids, context={}):
obj = self.browse(ids)[0]
line_ids = [l.id for l in obj.lines]
if line_ids:
get_model("stock.count.line").delete(line_ids)
return {
"flash": "Stock count lines deleted",
}
def update_lines(self, ids, context={}):
obj=self.browse(ids[0])
qtys={}
amts={}
for bal in get_model("stock.balance").search_browse([["location_id", "=", obj.location_id.id]]):
k=(bal.product_id.id,bal.lot_id.id)
qtys[k]=bal.qty_phys
amts[k]=bal.amount
for line in obj.lines:
prod=line.product_id
k=(prod.id,line.lot_id.id)
qty=qtys.get(k,0)
amt=amts.get(k,0)
vals={
"bin_location": prod.bin_location,
"prev_qty": qty,
"prev_cost_amount": amt,
"uom_id": prod.uom_id.id,
}
line.write(vals)
return {
"flash": "Stock count lines updated",
}
def add_lines(self, ids, context={}): # FIXME: prev_qty
print("stock_count.add_lines")
obj = self.browse(ids)[0]
loc_id = obj.location_id.id
prod_lines={}
for line in obj.lines:
prod_lines[(line.product_id.id,line.lot_id.id)]=line.id
n=0
for bal in get_model("stock.balance").search_browse([["location_id", "=", loc_id]]):
if bal.qty_phys == 0 and bal.amount==0:
continue
prod=bal.product_id
lot=bal.lot_id
line_id=prod_lines.get((prod.id,lot.id))
if line_id:
continue
vals = {
"count_id": obj.id,
"product_id": prod.id,
"lot_id": bal.lot_id.id,
"bin_location": prod.bin_location,
"prev_qty": bal.qty_phys,
"prev_cost_amount": bal.amount,
"new_qty": 0,
"unit_price": 0,
"uom_id": prod.uom_id.id,
}
get_model("stock.count.line").create(vals)
n+=1
print("n=%d"%n)
return {
"flash": "%d stock count lines added"%n,
}
def remove_dup(self,ids,context={}):
obj = self.browse(ids[0])
prod_lines={}
dup_ids=[]
for line in obj.lines:
k=(line.product_id.id,line.lot_id.id)
if k in prod_lines:
dup_ids.append(line.id)
else:
prod_lines[k]=line.id
get_model("stock.count.line").delete(dup_ids)
return {
"flash": "%d duplicate lines removed"%len(dup_ids),
}
def onchange_product(self, context):
data = context["data"]
loc_id = data["location_id"]
path = context["path"]
line = get_data_path(data, path, parent=True)
prod_id = line.get("product_id")
if not prod_id:
return {}
prod = get_model("product").browse(prod_id)
lot_id = line.get("lot_id")
key=(prod.id,lot_id,loc_id,None)
ctx={"date_to":data["date"]}
bals=get_model("stock.balance").compute_key_balances([key],context=ctx)[key]
qty=bals[0]
amt=bals[1]
unit_price=amt/qty if qty else 0
line["bin_location"] = prod.bin_location
line["prev_qty"] = qty
line["prev_cost_amount"] = amt
line["prev_cost_price"] = unit_price
line["new_qty"] = qty
line["unit_price"] = unit_price
line["uom_id"] = prod.uom_id.id
return data
def update_prev_qtys(self,ids,context={}):
print("StockCount.update_prev_qtys")
t0=time.time()
obj=self.browse(ids[0])
keys=[]
for line in obj.lines:
key=(line.product_id.id,line.lot_id.id,obj.location_id.id,None)
keys.append(key)
ctx={"date_to":obj.date}
all_bals=get_model("stock.balance").compute_key_balances(keys,context=ctx)
for line in obj.lines:
key=(line.product_id.id,line.lot_id.id,obj.location_id.id,None)
bals=all_bals[key]
qty=bals[0]
amt=bals[1]
line.write({
"prev_qty": qty,
"prev_cost_amount": amt,
})
t1=time.time()
print("<< StockCount.update_prev_qtys finished in %.2f s"%(t1-t0))
def validate(self, ids, context={}):
print("StockCount.validate",ids)
self.update_prev_qtys(ids,context=context)
obj = self.browse(ids[0])
settings = get_model("settings").browse(1)
res = get_model("stock.location").search([["type", "=", "inventory"]])
if not res:
raise Exception("Inventory loss location not found")
prod_lines={}
for line in obj.lines:
k=(line.product_id.id,line.lot_id.id)
if k in prod_lines:
raise Exception("Duplicate product in stock count: %s"%line.product_id.code)
prod_lines[k]=line.id
invent_loc_id = res[0]
move_ids = []
prod_ids = []
line_no=0
num_lines=len(obj.lines)
db=database.get_connection()
t0=time.time()
for line in obj.lines:
line_no+=1
print("line %s/%s"%(line_no,num_lines))
prod=line.product_id
if prod.type!="stock":
raise Exception("Invalid product type in stock count: %s"%prod.code)
prod_ids.append(line.product_id.id)
if line.new_qty <= line.prev_qty:
qty_diff = line.prev_qty - line.new_qty
amount_diff = (line.prev_cost_amount or 0) - (line.new_cost_amount or 0)
price_diff = amount_diff / qty_diff if qty_diff else 0
loc_from_id = obj.location_id.id
loc_to_id = invent_loc_id
elif line.new_qty > line.prev_qty:
qty_diff = line.new_qty - line.prev_qty
amount_diff = line.new_cost_amount - (line.prev_cost_amount or 0)
price_diff = amount_diff / qty_diff if qty_diff else 0
loc_from_id = invent_loc_id
loc_to_id = obj.location_id.id
vals = {
"journal_id": obj.journal_id.id or settings.stock_count_journal_id.id,
"date": obj.date,
"ref": obj.number,
"product_id": line.product_id.id,
"lot_id": line.lot_id.id,
"location_from_id": loc_from_id,
"location_to_id": loc_to_id,
"qty": qty_diff,
"uom_id": line.uom_id.id,
"cost_price": price_diff,
"cost_amount": amount_diff,
"related_id": "stock.count,%d" % obj.id,
}
#move_id = get_model("stock.move").create(vals)
number="%s/%s"%(obj.number,line_no)
res=db.get("INSERT INTO stock_move (journal_id,date,ref,product_id,lot_id,location_from_id,location_to_id,qty,uom_id,cost_price,cost_amount,related_id,state,number,cost_fixed,company_id) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,'draft',%s,%s,%s) RETURNING id",vals["journal_id"],vals["date"],vals["ref"],vals["product_id"],vals["lot_id"],vals["location_from_id"],vals["location_to_id"],vals["qty"],vals["uom_id"],vals["cost_price"],vals["cost_amount"],vals["related_id"],number,True,obj.company_id.id)
move_id=res.id
move_ids.append(move_id)
t1=time.time()
print(" stock movements created in %.2f s"%(t1-t0))
get_model("stock.move").set_done(move_ids)
print(" stock movements completed")
obj.write({"state": "done"})
def void(self, ids, context={}):
obj = self.browse(ids)[0]
obj.moves.delete()
obj.write({"state": "voided"})
def to_draft(self, ids, context={}):
obj = self.browse(ids)[0]
obj.moves.delete()
obj.write({"state": "draft"})
def copy(self, ids, context={}):
obj = self.browse(ids)[0]
vals = {
"location_id": obj.location_id.id,
"date": obj.date,
"lines": [],
}
for line in obj.lines:
line_vals = {
"product_id": line.product_id.id,
"lot_id": line.lot_id.id,
"bin_location": line.bin_location,
"prev_qty": line.prev_qty,
"new_qty": line.new_qty,
"unit_price": line.unit_price,
"uom_id": line.uom_id.id,
}
vals["lines"].append(("create", line_vals))
new_id = self.create(vals)
new_obj = self.browse(new_id)
return {
"next": {
"name": "stock_count",
"mode": "form",
"active_id": new_id,
},
"flash": "Stock count %s copied from %s" % (new_obj.number, obj.number),
}
def delete(self, ids, **kw):
move_ids = []
for obj in self.browse(ids):
for move in obj.moves:
move_ids.append(move.id)
get_model("stock.move").delete(move_ids)
super().delete(ids, **kw)
def get_total_cost_amount(self,ids,context={}):
vals={}
for obj in self.browse(ids):
total=0
for line in obj.lines:
total+=line.new_cost_amount
vals[obj.id]=total
return vals
StockCount.register()
|
|
import os
import random
import weakref
from aredis import StrictRedis
from aredis.connection import Connection
from aredis.pool import ConnectionPool
from aredis.exceptions import (ConnectionError, ResponseError, ReadOnlyError,
TimeoutError)
from aredis.utils import iteritems, nativestr
class MasterNotFoundError(ConnectionError):
pass
class SlaveNotFoundError(ConnectionError):
pass
class SentinelManagedConnection(Connection):
def __init__(self, **kwargs):
self.connection_pool = kwargs.pop('connection_pool')
super(SentinelManagedConnection, self).__init__(**kwargs)
def __repr__(self):
pool = self.connection_pool
if self.host:
host_info = ',host=%s,port=%s' % (self.host, self.port)
else:
host_info = ''
s = '{}<service={}{}>'.format(type(self).__name__, pool.service_name, host_info)
return s
async def connect_to(self, address):
self.host, self.port = address
await super(SentinelManagedConnection, self).connect()
if self.connection_pool.check_connection:
await self.send_command('PING')
if nativestr(await self.read_response()) != 'PONG':
raise ConnectionError('PING failed')
async def connect(self):
if self._reader and self._writer:
return # already connected
if self.connection_pool.is_master:
await self.connect_to(await self.connection_pool.get_master_address())
else:
for slave in await self.connection_pool.rotate_slaves():
try:
return await self.connect_to(slave)
except ConnectionError:
continue
raise SlaveNotFoundError # Never be here
async def read_response(self):
try:
return await super(SentinelManagedConnection, self).read_response()
except ReadOnlyError:
if self.connection_pool.is_master:
# When talking to a master, a ReadOnlyError when likely
# indicates that the previous master that we're still connected
# to has been demoted to a slave and there's a new master.
# calling disconnect will force the connection to re-query
# sentinel during the next connect() attempt.
self.disconnect()
raise ConnectionError('The previous master is now a slave')
raise
class SentinelConnectionPool(ConnectionPool):
"""
Sentinel backed connection pool.
If ``check_connection`` flag is set to True, SentinelManagedConnection
sends a PING command right after establishing the connection.
"""
def __init__(self, service_name, sentinel_manager, **kwargs):
kwargs['connection_class'] = kwargs.get(
'connection_class', SentinelManagedConnection)
self.is_master = kwargs.pop('is_master', True)
self.check_connection = kwargs.pop('check_connection', False)
super(SentinelConnectionPool, self).__init__(**kwargs)
self.connection_kwargs['connection_pool'] = weakref.proxy(self)
self.service_name = service_name
self.sentinel_manager = sentinel_manager
def __repr__(self):
return "{}<service={}({})".format(
type(self).__name__,
self.service_name,
self.is_master and 'master' or 'slave',
)
def reset(self):
super(SentinelConnectionPool, self).reset()
self.master_address = None
self.slave_rr_counter = None
async def get_master_address(self):
master_address = await self.sentinel_manager.discover_master(
self.service_name)
if self.is_master:
if self.master_address is None:
self.master_address = master_address
elif master_address != self.master_address:
# Master address changed, disconnect all clients in this pool
self.disconnect()
return master_address
async def rotate_slaves(self):
"""Round-robin slave balancer"""
slaves = await self.sentinel_manager.discover_slaves(self.service_name)
slave_address = list()
if slaves:
if self.slave_rr_counter is None:
self.slave_rr_counter = random.randint(0, len(slaves) - 1)
for _ in range(len(slaves)):
self.slave_rr_counter = (self.slave_rr_counter + 1) % len(slaves)
slave_address.append(slaves[self.slave_rr_counter])
return slave_address
# Fallback to the master connection
try:
return [await self.get_master_address()]
except MasterNotFoundError:
pass
raise SlaveNotFoundError('No slave found for %r' % (self.service_name))
def _checkpid(self):
if self.pid != os.getpid():
self.disconnect()
self.reset()
self.__init__(self.service_name, self.sentinel_manager,
is_master=self.is_master,
check_connection=self.check_connection,
connection_class=self.connection_class,
max_connections=self.max_connections,
**self.connection_kwargs)
class Sentinel:
"""
Redis Sentinel cluster client
from aredis.sentinel import Sentinel
sentinel = Sentinel([('localhost', 26379)], stream_timeout=0.1)
async def test():
master = await sentinel.master_for('mymaster', stream_timeout=0.1)
await master.set('foo', 'bar')
slave = await sentinel.slave_for('mymaster', stream_timeout=0.1)
await slave.get('foo')
``sentinels`` is a list of sentinel nodes. Each node is represented by
a pair (hostname, port).
``min_other_sentinels`` defined a minimum number of peers for a sentinel.
When querying a sentinel, if it doesn't meet this threshold, responses
from that sentinel won't be considered valid.
``sentinel_kwargs`` is a dictionary of connection arguments used when
connecting to sentinel instances. Any argument that can be passed to
a normal Redis connection can be specified here. If ``sentinel_kwargs`` is
not specified, any stream_timeout and socket_keepalive options specified
in ``connection_kwargs`` will be used.
``connection_kwargs`` are keyword arguments that will be used when
establishing a connection to a Redis server.
"""
def __init__(self, sentinels, min_other_sentinels=0, sentinel_kwargs=None,
**connection_kwargs):
# if sentinel_kwargs isn't defined, use the socket_* options from
# connection_kwargs
if sentinel_kwargs is None:
sentinel_kwargs = dict([(k, v)
for k, v in iteritems(connection_kwargs)
if k.startswith('socket_')
])
self.sentinel_kwargs = sentinel_kwargs
self.sentinels = [StrictRedis(hostname, port, **self.sentinel_kwargs)
for hostname, port in sentinels]
self.min_other_sentinels = min_other_sentinels
self.connection_kwargs = connection_kwargs
def __repr__(self):
sentinel_addresses = []
for sentinel in self.sentinels:
sentinel_addresses.append('{}:{}'.format(
sentinel.connection_pool.connection_kwargs['host'],
sentinel.connection_pool.connection_kwargs['port'],
))
return '{}<sentinels=[{}]>'.format(
type(self).__name__,
','.join(sentinel_addresses))
def check_master_state(self, state, service_name):
if not state['is_master'] or state['is_sdown'] or state['is_odown']:
return False
# Check if our sentinel doesn't see other nodes
if state['num-other-sentinels'] < self.min_other_sentinels:
return False
return True
async def discover_master(self, service_name):
"""
Asks sentinel servers for the Redis master's address corresponding
to the service labeled ``service_name``.
Returns a pair (address, port) or raises MasterNotFoundError if no
master is found.
"""
for sentinel_no, sentinel in enumerate(self.sentinels):
try:
masters = await sentinel.sentinel_masters()
except (ConnectionError, TimeoutError):
continue
state = masters.get(service_name)
if state and self.check_master_state(state, service_name):
# Put this sentinel at the top of the list
self.sentinels[0], self.sentinels[sentinel_no] = (
sentinel, self.sentinels[0])
return state['ip'], state['port']
raise MasterNotFoundError("No master found for %r" % (service_name,))
def filter_slaves(self, slaves):
"""Removes slaves that are in an ODOWN or SDOWN state"""
slaves_alive = []
for slave in slaves:
if slave['is_odown'] or slave['is_sdown']:
continue
slaves_alive.append((slave['ip'], slave['port']))
return slaves_alive
async def discover_slaves(self, service_name):
"""Returns a list of alive slaves for service ``service_name``"""
for sentinel in self.sentinels:
try:
slaves = await sentinel.sentinel_slaves(service_name)
except (ConnectionError, ResponseError, TimeoutError):
continue
slaves = self.filter_slaves(slaves)
if slaves:
return slaves
return []
def master_for(self, service_name, redis_class=StrictRedis,
connection_pool_class=SentinelConnectionPool, **kwargs):
"""
Returns a redis client instance for the ``service_name`` master.
A SentinelConnectionPool class is used to retrive the master's
address before establishing a new connection.
NOTE: If the master's address has changed, any cached connections to
the old master are closed.
By default clients will be a redis.StrictRedis instance. Specify a
different class to the ``redis_class`` argument if you desire
something different.
The ``connection_pool_class`` specifies the connection pool to use.
The SentinelConnectionPool will be used by default.
All other keyword arguments are merged with any connection_kwargs
passed to this class and passed to the connection pool as keyword
arguments to be used to initialize Redis connections.
"""
kwargs['is_master'] = True
connection_kwargs = dict(self.connection_kwargs)
connection_kwargs.update(kwargs)
return redis_class(connection_pool=connection_pool_class(
service_name, self, **connection_kwargs))
def slave_for(self, service_name, redis_class=StrictRedis,
connection_pool_class=SentinelConnectionPool, **kwargs):
"""
Returns redis client instance for the ``service_name`` slave(s).
A SentinelConnectionPool class is used to retrive the slave's
address before establishing a new connection.
By default clients will be a redis.StrictRedis instance. Specify a
different class to the ``redis_class`` argument if you desire
something different.
The ``connection_pool_class`` specifies the connection pool to use.
The SentinelConnectionPool will be used by default.
All other keyword arguments are merged with any connection_kwargs
passed to this class and passed to the connection pool as keyword
arguments to be used to initialize Redis connections.
"""
kwargs['is_master'] = False
connection_kwargs = dict(self.connection_kwargs)
connection_kwargs.update(kwargs)
return redis_class(connection_pool=connection_pool_class(
service_name, self, **connection_kwargs))
|
|
from .. utils import TranspileTestCase, UnaryOperationTestCase, BinaryOperationTestCase, InplaceOperationTestCase
class BytesTests(TranspileTestCase):
def test_setattr(self):
self.assertCodeExecution("""
x = b'hello, world'
try:
x.attr = 42
except AttributeError as err:
print(err)
""")
def test_islower(self):
self.assertCodeExecution("""
print(b'abc'.islower())
print(b''.islower())
print(b'Abccc'.islower())
print(b'HELLO WORD'.islower())
print(b'@#$%!'.islower())
print(b'hello world'.islower())
print(b'hello world '.islower())
# TODO: uncomment when adding support for literal hex bytes
#print(b'\xf0'.islower())
""")
# self.assertCodeExecution("""""")
def test_isupper(self):
self.assertCodeExecution("""
print(b'abc'.isupper())
print(b''.isupper())
print(b'Abccc'.isupper())
print(b'HELLO WORD'.isupper())
print(b'@#$%!'.isupper())
print(b'hello world'.isupper())
print(b'hello world '.isupper())
""")
def test_getattr(self):
self.assertCodeExecution("""
x = b'hello, world'
try:
print(x.attr)
except AttributeError as err:
print(err)
""")
def test_capitalize(self):
self.assertCodeExecution(r"""
print(b'hello, world'.capitalize())
print(b'helloWORLD'.capitalize())
print(b'HELLO WORLD'.capitalize())
print(b'2015638687'.capitalize())
print(b'\xc8'.capitalize())
""")
def test_partition(self):
self.assertCodeExecution(r"""
print(b'hello, world'.partition(b','))
print(b'hello, world'.partition(b'h'))
print(b'hello, world'.partition(b'd'))
print(b'hello, world'.partition(b', '))
print(b'hello, world'.partition(b'l'))
print(b'2015638687'.partition(b'a'))
print(b'\xc8'.partition(b' '))
""")
# self.assertCodeExecution(r"""
# print(b'hello, world'.partition(None))
# """, exits_early=True)
def test_repr(self):
self.assertCodeExecution(r"""
print(repr(b'\xc8'))
print(repr(b'abcdef \xc8 abcdef'))
print(repr(b'abcdef \xc8 abcdef\n\r\t'))
print(b'abcdef \xc8 abcdef\n\r\t')
for b in range(0, 256, 16):
print(repr(bytes(range(b, b+16))))
for b in range(0, 256, 16):
print(bytes(range(b, b+16)))
""")
def test_iter(self):
self.assertCodeExecution("""
print([b for b in b''])
print([b for b in b'hello world'])
""")
def test_getitem(self):
self.assertCodeExecution("""
x = b'0123456789'
print("x[0] = ", x[0])
print("x[-10] = ", x[-10])
print("x[9] = ", x[9])
#Start/Stop Empty w/Step +/-
print("x[:] = ", x[:])
print("x[::1] = ", x[::1])
print("x[::2] = ", x[::2])
print("x[::3] = ", x[::3])
print("x[::-1] = ", x[::-1])
print("x[::-2] = ", x[::-2])
print("x[::-3] = ", x[::-3])
#Empty Start Tests With Stop Bounds checks
print("x[:9:1] = ", x[:9:1])
print("x[:10:1] = ", x[:10:1])
print("x[:11:1] = ", x[:11:1])
print("x[:-9:1] = ", x[:-9:1])
print("x[:-10:1] = ", x[:-10:1])
print("x[:-11:1] = ", x[:-11:1])
print("x[:9:2] = ", x[:9:2])
print("x[:10:2] = ", x[:10:2])
print("x[:11:2] = ", x[:11:2])
print("x[:-9:2] = ", x[:-9:2])
print("x[:-10:2] = ", x[:-10:2])
print("x[:-11:2] = ", x[:-11:2])
print("x[:9:3] = ", x[:9:3])
print("x[:10:3] = ", x[:10:3])
print("x[:11:3] = ", x[:11:3])
print("x[:-9:3] = ", x[:-9:3])
print("x[:-10:3] = ", x[:-10:3])
print("x[:-11:3] = ", x[:-11:3])
print("x[:9:-1] = ", x[:9:-1])
print("x[:10:-1] = ", x[:10:-1])
print("x[:11:-1] = ", x[:11:-1])
print("x[:-9:-1] = ", x[:-9:-1])
print("x[:-10:-1] = ", x[:-10:-1])
print("x[:-11:-1] = ", x[:-11:-1])
print("x[:9:-2] = ", x[:9:-2])
print("x[:10:-2] = ", x[:10:-2])
print("x[:11:-2] = ", x[:11:-2])
print("x[:-9:-2] = ", x[:-9:-2])
print("x[:-10:-2] = ", x[:-10:-2])
print("x[:-11:-2] = ", x[:-11:-2])
print("x[:9:-3] = ", x[:9:-3])
print("x[:10:-3] = ", x[:10:-3])
print("x[:11:-3] = ", x[:11:-3])
print("x[:-9:-3] = ", x[:-9:-3])
print("x[:-10:-3] = ", x[:-10:-3])
print("x[:-11:-3] = ", x[:-11:-3])
#Empty stop tests with stop bounds checks
print("x[9::1] = ", x[9::1])
print("x[10::1] = ", x[10::1])
print("x[11::1] = ", x[11::1])
print("x[-9::1] = ", x[-9::1])
print("x[-10::1] = ", x[-10::1])
print("x[-11::1] = ", x[-11::1])
print("x[9::2] = ", x[9::2])
print("x[10::2] = ", x[10::2])
print("x[11::2] = ", x[11::2])
print("x[-9::2] = ", x[-9::2])
print("x[-10::2] = ", x[-10::2])
print("x[-11::2] = ", x[-11::2])
print("x[9::3] = ", x[9::3])
print("x[10::3] = ", x[10::3])
print("x[11::3] = ", x[11::3])
print("x[-9::3] = ", x[-9::3])
print("x[-10::3] = ", x[-10::3])
print("x[-11::3] = ", x[-11::3])
print("x[9::-1] = ", x[9::-1])
print("x[10::-1] = ", x[10::-1])
print("x[11::-1] = ", x[11::-1])
print("x[-9::-1] = ", x[-9::-1])
print("x[-10::-1] = ", x[-10::-1])
print("x[-11::-1] = ", x[-11::-1])
print("x[9::-2] = ", x[9::-2])
print("x[10::-2] = ", x[10::-2])
print("x[11::-2] = ", x[11::-2])
print("x[-9::-2] = ", x[-9::-2])
print("x[-10::-2] = ", x[-10::-2])
print("x[-11::-2] = ", x[-11::-2])
print("x[9::-3] = ", x[9::-3])
print("x[10::-3] = ", x[10::-3])
print("x[11::-3] = ", x[11::-3])
print("x[-9::-3] = ", x[-9::-3])
print("x[-10::-3] = ", x[-10::-3])
print("x[-11::-3] = ", x[-11::-3])
#other tests
print("x[-5:] = ", x[-5:])
print("x[:-5] = ", x[:-5])
print("x[-2:-8] = ", x[-2:-8])
print("x[100::-1] = ", x[100::-1])
print("x[100:-100:-1] = ", x[100:-100:-1])
print("x[:-100:-1] = ", x[:-100:-1])
print("x[::-1] = ", x[::-1])
print("x[::-2] = ", x[::-2])
print("x[::-3] = ", x[::-3])
print("x[:0:-1] = ", x[:0:-1])
print("x[-5::-2] = ", x[-5::-2])
print("x[:-5:-2] = ", x[:-5:-2])
print("x[-2:-8:-2] = ", x[-2:-8:-2])
print("x[0:9] = ", x[0:9])
print("x[0:10:1] = ", x[0:10:1] )
print("x[10:0] = ", x[10:0])
print("x[10:0:-1] = ", x[10:0:-1])
print("x[10:-10:-1] = ", x[10:-10:-1])
print("x[10:-11:-1] = ", x[10:-11:-1])
""")
def test_count(self):
self.assertCodeExecution("""
print(b'abcabca'.count(97))
print(b'abcabca'.count(b'abc'))
print(b'qqq'.count(b'q'))
print(b'qqq'.count(b'qq'))
print(b'qqq'.count(b'qqq'))
print(b'qqq'.count(b'qqqq'))
print(b'abcdefgh'.count(b'bc',-7, -5))
print(b'abcdefgh'.count(b'bc',1, -5))
print(b'abcdefgh'.count(b'bc',0, 3))
print(b'abcdefgh'.count(b'bc',-7, 500))
print(b'qqaqqbqqqcqqqdqqqqeqqqqf'.count(b'qq'),1)
print(b''.count(b'q'),0)
""")
self.assertCodeExecution("""
b'abcabc'.count([]) #Test TypeError invalid byte array
""", exits_early=True)
self.assertCodeExecution("""
b'abcabc'.count(256) #Test ValueError invalid integer range
""", exits_early=True)
self.assertCodeExecution("""
print(b'abcabca'.count(97, [], 3)) #Test Slicing Error on Start
""", exits_early=True)
self.assertCodeExecution("""
print(b'abcabca'.count(97, 3, [])) #Test Slicing Error on End
""", exits_early=True)
def test_find(self):
self.assertCodeExecution("""
print(b''.find(b'a'))
print(b'abcd'.find(b''))
print(b'abcd'.find(b'...'))
print(b'abcd'.find(b'a'))
print(b'abcd'.find(b'b'))
print(b'abcd'.find(b'c'))
print(b'abcd'.find(b'd'))
print(b'abcd'.find(b'ab'))
print(b'abcd'.find(b'bc'))
print(b'abcd'.find(b'cd'))
print(b'abcd'.find(b'cd', 2))
print(b'abcd'.find(b'ab', 3))
print(b'abcd'.find(b'cd', 2, 3))
print(b'abcd'.find(b'ab', 3, 4))
""")
def test_index(self):
self.assertCodeExecution("""
print(b'abcd'.index(b'ab'))
print(b'abcd'.index(b'bc'))
print(b'abcd'.index(b'cd'))
print(b'abcd'.find(b'cd', 2))
print(b'abcd'.find(b'cd', 2, 3))
""")
self.assertCodeExecution("""
print(b''.index(b'a'))
print(b'abcd'.index(b''))
print(b'abcd'.index(b'...'))
print(b'abcd'.find(b'ab', 3))
print(b'abcd'.find(b'ab', 3, 4))
""", exits_early=True)
def test_contains(self):
self.assertCodeExecution("""
print(b'py' in b'pybee')
print(b'bee' in b'pybee')
print(b'ybe' in b'pybee')
print(b'test' in b'pybee')
print(101 in b'pybee')
""")
self.assertCodeExecution("""
print(300 in b'pybee') #Test ValueError invalid integer range
""", exits_early=True)
self.assertCodeExecution("""
print(['b', 'e'] in b'pybee') #Test TypeError invalid byte array
""", exits_early=True)
def test_isalnum(self):
self.assertCodeExecution("""
print(b'w1thnumb3r2'.isalnum())
print(b'withoutnumber'.isalnum())
print(b'with spaces'.isalnum())
print(b'666'.isalnum())
print(b'66.6'.isalnum())
print(b' '.isalnum())
print(b''.isalnum())
print(b'/@. test'.isalnum())
print(b'\x46\x55\x43\x4B'.isalnum())
""")
def test_isalpha(self):
self.assertCodeExecution("""
print(b'testalpha'.isalpha())
print(b'TestAlpha'.isalpha())
print(b'test alpha'.isalpha())
print(b'666'.isalpha())
print(b'66.6'.isalpha())
print(b' '.isalpha())
print(b''.isalpha())
print(b'/@. test'.isalpha())
print(b'\x46\x55\x43\x4B'.isalpha())
""")
def test_isdigit(self):
self.assertCodeExecution("""
print(b'testdigit'.isdigit())
print(b'TestDigit'.isdigit())
print(b'test digit'.isdigit())
print(b'666'.isdigit())
print(b'66.6'.isdigit())
print(b' '.isdigit())
print(b''.isdigit())
print(b'/@. test'.isdigit())
print(b'\x46\x55\x43\x4B'.isdigit())
""")
def test_center(self):
self.assertCodeExecution("""
print(b'pybee'.center(12))
print(b'pybee'.center(13))
print(b'pybee'.center(2))
print(b'pybee'.center(2, b'a'))
print(b'pybee'.center(12, b'a'))
print(b'pybee'.center(13, b'a'))
print(b'pybee'.center(-5))
print(b''.center(5))
print(b'pybee'.center(True, b'a'))
print(b'pybee'.center(True, bytearray(b'a')))
""")
self.assertCodeExecution("""
print(b'pybee'.center('5'))
""", exits_early=True)
self.assertCodeExecution("""
print(b'pybee'.center(12, b'as'))
""", exits_early=True)
self.assertCodeExecution("""
print(b'pybee'.center(12, 'a'))
""", exits_early=True)
def test_upper(self):
self.assertCodeExecution("""
print(b'testupper'.upper())
print(b'TestUpper'.upper())
print(b'test upper'.upper())
print(b'666'.upper())
print(b' '.upper())
print(b''.upper())
print(b'/@. test'.upper())
print(b'\x46\x55\x43\x4B'.upper())
""")
def test_lower(self):
self.assertCodeExecution("""
print(b"abc".lower())
print(b"HELLO WORLD!".lower())
print(b"hElLO wOrLd".lower())
print(b"[Hello] World".lower())
""")
def test_swapcase(self):
self.assertCodeExecution("""
print(b"abc".swapcase())
print(b"ABC".swapcase())
print(b"HELLO WORLD!".swapcase())
print(b"hElLO wOrLd".swapcase())
print(b"[Hello] World".swapcase())
""")
def test_isspace(self):
self.assertCodeExecution("""
print(b'testisspace'.isspace())
print(b'test isspace'.isspace())
print(b' '.isspace())
print(b''.isspace())
print(b' \x46'.isspace())
print(b' \t\t'.isspace())
print(b' \x0b'.isspace())
print(b' \f'.isspace())
print(b' \\n'.isspace())
print(b' \\r'.isspace())
""")
def test_endswith(self):
self.assertCodeExecution("""
print(b"abc".endswith(b"c"))
print(b"abc".endswith(b"bc"))
print(b"abc".endswith(b"d"))
print(b"".endswith(b"a"))
print(b"abc".endswith(b""))
print(b"abcde".endswith(b"de"))
print(b"abcde".endswith(b"de", 2))
print(b"abcde".endswith(b"de", 4))
print(b"abcde".endswith(b"bc", 0, 3))
print(b"abcde".endswith(b"abc", 0, 3))
print(b"abcde".endswith(b"abc", 0, 4))
print(b"abcde".endswith(b"abc", 1, 3))
print(b"abcde".endswith(b"abc", 1, 4))
print(b"abcde".endswith(b"abc", -1, 4))
print(b"abcde".endswith(b"abc", 1, -1))
print(b"abcde".endswith(b"abc", -6, -2))
print(b"abcde".endswith((b"abc",)))
print(b"abcde".endswith((b"abc", b"de")))
print(b"abcde".endswith((b"abc", b"de", b"c")))
print(b"abcde".endswith((b"de", b"d"), -4))
print(b"abcde".endswith((b"de", b"d"), -4, 0))
print(b"abcde".endswith((b"de", b"d"), -4, -1))
print(b"abcde".endswith((b"da", b"aaa"), -4))
""")
def test_startswith(self):
self.assertCodeExecution("""
print(b"abc".startswith(b"a"))
print(b"abc".startswith(b"bc"))
print(b"abc".startswith(b"ab"))
print(b"abc".startswith(b"d"))
print(b"".startswith(b"a"))
print(b"abc".startswith(b""))
print(b"abcde".startswith(b"ab"))
print(b"abcde".startswith(b"de", 3))
print(b"abcde".startswith(b"abc", 3))
print(b"abcde".startswith(b"de", 2))
print(b"abcde".startswith(b"bc", 1, 7))
print(b"abcde".startswith(b"abc", 0, 3))
print(b"abcde".startswith(b"abc", 0, 4))
print(b"abcde".startswith(b"abc", -10, 4))
print(b"abcde".startswith(b"abc", -5, 4))
print(b"abcde".startswith(b"abc", -4, 4))
print(b"abcde".startswith(b"abc", 1, -1))
print(b"abcde".startswith(b"abc", -6, -2))
print(b"abcde".startswith((b"abc",)))
print(b"abcde".startswith((b"abc", b"de")))
print(b"abcde".startswith((b"de",)))
print(b"abcde".startswith((b"de", b"c")))
print(b"abcde".startswith((b"de", b"c", b"b", b"ab")))
print(b"abcde".startswith((b"de", b"d"), -2))
print(b"abcde".startswith((b"de", b"d"), -2, 0))
print(b"abcde".startswith((b"de"), -2))
print(b"abcde".startswith((b"de", b"d"), -2, -1))
print(b"abcde".startswith((b"da", b"aaa"), -4))
""")
def test_strip(self):
self.assertCodeExecution(r"""
print(b"abcde".lstrip())
print(b" abcde".lstrip())
print(b"\n \t abcde".lstrip())
print(b"abcde".rstrip())
print(b"abcde ".rstrip())
print(b"abcde \t \n".rstrip())
print(b"abcde".strip())
print(b"abcde ".strip())
print(b" abcde \t \n".strip())
print(b"".strip())
print(b" \n".strip())
""")
def test_title(self):
self.assertCodeExecution(r"""
print(b"".title())
print(b"abcd".title())
print(b"NOT".title())
print(b"coca cola".title())
print(b"they are from UK, are they not?".title())
print(b'/@.'.title())
print(b'\x46\x55\x43\x4B'.title())
print(b"py.bee".title())
""")
def test_istitle(self):
self.assertCodeExecution(r"""
print(b"".istitle())
print(b"abcd".istitle())
print(b"NOT".istitle())
print(b"coca cola".istitle())
print(b"they are from UK, are they not?".istitle())
print(b'/@.'.istitle())
print(b'\x46\x55\x43\x4B'.istitle())
print(b"py.bee".title())
""")
def test_split(self):
self.assertCodeExecution("""
print(b''.split())
print(b'py bee'.split())
print(b'pyXbXee'.split(b'X'))
print(b'pyXbee'.split(b'z'))
print(b'pyZZbee'.split(b'ZZ'))
print(b'pybebyp'.split(b'e', 1))
print(b'aabaabaa'.split(b'b', 1))
print(b'aabaabaa'.split(b'b', -1))
print(b'one two three'.split(maxsplit=1))
""")
self.assertCodeExecution("""
print(b''.split('a'))
""", exits_early=True)
self.assertCodeExecution("""
print(b'pyXbee'.split('a'))
""", exits_early=True)
self.assertCodeExecution("""
print(b'pyXbee'.split(maxsplit='5'))
""", exits_early=True)
self.assertCodeExecution("""
print(b''.split(maxsplit='5'))
""", exits_early=True)
class UnaryBytesOperationTests(UnaryOperationTestCase, TranspileTestCase):
data_type = 'bytes'
not_implemented = [
]
class BinaryBytesOperationTests(BinaryOperationTestCase, TranspileTestCase):
data_type = 'bytes'
not_implemented = [
'test_direct_eq_bytearray',
'test_direct_eq_none',
'test_direct_ge_bytearray',
'test_direct_ge_none',
'test_direct_gt_bytearray',
'test_direct_gt_none',
'test_direct_le_bytearray',
'test_direct_le_none',
'test_direct_lt_bytearray',
'test_direct_lt_none',
'test_direct_ne_bytearray',
'test_direct_ne_none',
'test_modulo_complex',
'test_modulo_dict',
]
not_implemented_versions = {
'test_modulo_None': (3.5, 3.6),
'test_modulo_NotImplemented': (3.5, 3.6),
'test_modulo_bool': (3.5, 3.6),
'test_modulo_bytearray': (3.5, 3.6),
'test_modulo_bytes': (3.5, 3.6),
'test_modulo_class': (3.5, 3.6),
'test_modulo_float': (3.5, 3.6),
'test_modulo_frozenset': (3.5, 3.6),
'test_modulo_int': (3.5, 3.6),
'test_modulo_list': (3.5, 3.6),
'test_modulo_range': (3.5, 3.6),
'test_modulo_set': (3.5, 3.6),
'test_modulo_slice': (3.5, 3.6),
'test_modulo_str': (3.5, 3.6),
'test_modulo_tuple': (3.5, 3.6),
}
class InplaceBytesOperationTests(InplaceOperationTestCase, TranspileTestCase):
data_type = 'bytes'
not_implemented = [
'test_modulo_complex',
]
not_implemented_versions = {
'test_modulo_None': (3.5, 3.6),
'test_modulo_NotImplemented': (3.5, 3.6),
'test_modulo_bool': (3.5, 3.6),
'test_modulo_bytearray': (3.5, 3.6),
'test_modulo_bytes': (3.5, 3.6),
'test_modulo_class': (3.5, 3.6),
'test_modulo_float': (3.5, 3.6),
'test_modulo_frozenset': (3.5, 3.6),
'test_modulo_int': (3.5, 3.6),
'test_modulo_list': (3.5, 3.6),
'test_modulo_range': (3.5, 3.6),
'test_modulo_set': (3.5, 3.6),
'test_modulo_slice': (3.5, 3.6),
'test_modulo_str': (3.5, 3.6),
'test_modulo_tuple': (3.5, 3.6),
}
is_flakey = [
'test_modulo_dict',
]
|
|
# -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
A Brianer
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Specials.Simulaters.Simulater"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
from ShareYourSystem.Standards.Itemizers import Pointer,Networker
from ShareYourSystem.Standards.Recorders import Recorder,Tracer
#</ImportSpecificModules>
#<DefineLocals>
BrianNeurongroupsTeamKeyStr="Neurongroups"
BrianStatesTeamKeyStr="States"
class StatesClass(Networker.NetworkerClass):pass
#</DefineLocals>
#<DefineClass>
@DecorationClass(**{
'ClassingSwitchMethodStrsList':['brian']
})
class BrianerClass(BaseClass):
def default_init(self,
_BrianingNeurongroupDict=None,
_BrianingTraceDict=None,
_BrianingMoniterTuple=None,
_BrianingSpikesDict=None,
_BrianedNeurongroupVariable=None,
_BrianedTraceKeyStrsList=None,
_BrianedDeriveTracersList=None,
_BrianedParentNeurongroupDeriveBrianerVariable=None,
_BrianedStateMonitersList=None,
**_KwargVariablesDict
):
#Call the parent __init__ method
BaseClass.__init__(self,**_KwargVariablesDict)
def do_brian(self):
#/########################/#
# Import brian
# adapt the shape of the BrianingNeurongroupDict
#debug
self.debug(
[
'We brian here',
'We adapt the shape of BrianingNeurongroupDict',
('self.',self,[
'BrianingNeurongroupDict',
'TracingKeyVariable'
])
]
)
#Check
if 'N' not in self.BrianingNeurongroupDict:
self.BrianingNeurongroupDict['N']=self.SimulatingUnitsInt
else:
self.SimulatingUnitsInt=self.BrianingNeurongroupDict['N']
#Check
if 'model' not in self.BrianingNeurongroupDict:
self.BrianingNeurongroupDict['model']=''
#/##################/#
# Set finally the Neurongroup
#
#Check
if self.BrianingNeurongroupDict['model']!="" or self.BrianingNeurongroupDict['N']>0:
#maybe should import
from brian2 import NeuronGroup
#debug
'''
self.debug(
[
'It is a Neurongroup level, we set the Neurongroup',
('self.',self,[
'BrianingNeurongroupDict'
])
]
)
'''
#init
self.BrianedNeurongroupVariable=NeuronGroup(
**self.BrianingNeurongroupDict
)
#debug
'''
self.debug(
[
'Ok we have setted the Neurongroup',
('self.',self,[
'BrianedNeurongroupVariable'
])
]
)
'''
#/##################/#
# team Traces first all the brian variables
#
#get
self.BrianedTraceKeyStrsList=self.BrianedNeurongroupVariable.equations._equations.keys()
if len(self.BrianedTraceKeyStrsList)>0:
#debug
'''
self.debug(
[
'We simulate with neurongroup',
'adapt the initial conditions of all the brian variables',
'so first we team Traces and put Tracers inside or get it and mapSet'
]
)
'''
#Check
if 'Traces' not in self.TeamDict:
BrianedDeriveTraces=self.team(
BrianStatesTeamKeyStr
).TeamedValueVariable
else:
BrianedDeriveTraces=self.TeamDict[
BrianStatesTeamKeyStr
]
#map
self.BrianedDeriveTracersList=map(
lambda __ManagementKeyStr,__TraceKeyStr:
BrianedDeriveTraces.manage(
__ManagementKeyStr,
{
'TracingKeyVariable':getattr(
self.BrianedNeurongroupVariable,
__TraceKeyStr
),
'TraceKeyStr':__TraceKeyStr
}
).ManagedValueVariable
if __ManagementKeyStr not in BrianedDeriveTraces.ManagementDict
else BrianedDeriveTraces.ManagementDict[__ManagementKeyStr].mapSet(
{
'TracingKeyVariable':getattr(
self.BrianedNeurongroupVariable,
__TraceKeyStr
),
'TraceKeyStr':__TraceKeyStr
}
),
map(
lambda __BrianedTraceKeyStr:
Tracer.TracerPrefixStr+__BrianedTraceKeyStr,
self.BrianedTraceKeyStrsList
),
self.BrianedTraceKeyStrsList
)
#/##################/#
# We make brian the Tracers
#
#debug
self.debug(
[
'Make brian the tracers',
('self.',self,['BrianedDeriveTracersList'])
]
)
#map
map(
lambda __BrianedDeriveTracer:
__BrianedDeriveTracer.brian(),
self.BrianedDeriveTracersList
)
#debug
self.debug(
[
'Ok the Tracers have brianed',
]
)
"""
#/##################/#
# Now analyze the NeurongroupingStatesDict to set Moniters
#
#debug
'''
self.debug(
[
'We analyze the NeurongroupingStatesDict',
('self.',self,['NeurongroupingStatesDict'])
]
)
'''
#get
NeurongroupedTracesMoniterKeyStrsList=Moniter.MoniterClass.DoingAttributeVariablesOrderedDict.keys()
#map
self.NeurongroupedDeriveMonitersList=SYS.flat(
map(
lambda __DeriveMoniter,__SampleTuplesList:
map(
lambda __SampleTuple:
__DeriveMoniter.manage(
__SampleTuple[0],
SYS.match(
NeurongroupedTracesMoniterKeyStrsList,
__SampleTuple[1:]
)
).ManagedValueVariable,
__SampleTuplesList
),
map(
lambda __KeyStr:
BrianedDeriveTraces.ManagementDict[
Tracer.TracerPrefixStr+__KeyStr
].team('Samples').TeamedValueVariable,
self.NeurongroupingStatesDict.keys()
),
self.NeurongroupingStatesDict.values()
)
)
#/##################/#
# Set Monitors inside
#
#Check
if len(NeurongroupedTracesMoniterKeyStrsList)>0:
#debug
self.debug(
[
'We set the brian monitor inside'
]
)
#import
from brian2 import StateMonitor
#map
self.NeurongroupedDeriveStateMonitorsList=map(
lambda __NeurongroupedDeriveMoniter:
__NeurongroupedDeriveMoniter.set(
'MonitBrianVariable',
StateMonitor(
#NeuronGroup
self.BrianedNeurongroupVariable,
#varname
__NeurongroupedDeriveMoniter.ParentDeriveTeamerVariable.ParentDeriveTeamerVariable.TraceKeyStr,
#record
__NeurongroupedDeriveMoniter.MoniteringLabelIndexIntsArray
)
).MonitBrianVariable,
self.NeurongroupedDeriveMonitersList
)
"""
elif self.TracingKeyVariable!=None:
#debug
self.debug(
[
'It is a Tracer level, we set the Samples',
('self.',self,[
'TracingKeyVariable',
'TraceKeyStr'
])
]
)
#get
self.BrianedParentNeurongroupDeriveBrianerVariable=self.ParentDeriveTeamerVariable.ParentDeriveTeamerVariable
#/###################/#
# Build the samples and maybe one default moniter
#
#Check
if 'Samples' not in self.TeamDict:
BrianedDeriveSamples=self.team(
'Samples'
).TeamedValueVariable
else:
BrianedDeriveSamples=self.TeamDict[
'Samples'
]
#debug
self.debug(
[
'Do we have to set a default moniter ?',
'len(self.BrianedParentNeurongroupDeriveBrianerVariable.BrianedTraceKeyStrsList) is ',
str(len(self.BrianedParentNeurongroupDeriveBrianerVariable.BrianedTraceKeyStrsList))
]
)
#Check
if len(self.BrianedParentNeurongroupDeriveBrianerVariable.BrianedTraceKeyStrsList)==1:
#Check
if len(BrianedDeriveSamples.ManagementDict)==0:
BrianedDefaultMoniter=BrianedDeriveSamples.manage(
'Default',
).ManagedValueVariable
BrianedDefaultMoniter.MoniteringLabelIndexIntsArray=[0] if self.BrianedParentNeurongroupDeriveBrianerVariable.BrianingNeurongroupDict[
'N']>0 else []
#/###################/#
# We make brian the Moniters
#
#debug
self.debug(
[
'We make brian the moniters',
'BrianedDeriveSamples.ManagementDict.keys() is ',
str(BrianedDeriveSamples.ManagementDict.keys())
]
)
#map
map(
lambda __DeriveMoniter:
__DeriveMoniter.brian(),
BrianedDeriveSamples.ManagementDict.values()
)
#/###################/#
# We trace and set to the brian value
#
#debug
self.debug(
[
'We trace and alias the init in the brian object',
('self.',self,['TracingKeyVariable'])
]
)
#trace
self.trace()
#debug
self.debug(
[
'We have traced, alias the init in the brian object',
('self.',self,[
'TracedValueFloatsArray',
'TracedInitFloatsArray'
])
]
)
#alias
self.TracedValueFloatsArray[:]=self.TracedInitFloatsArray*self.TracedValueFloatsArray.unit
#debug
self.debug(
[
('self.',self,['TracedValueFloatsArray'])
]
)
def propertize_setWatchAfterParentWithParenterBool(self,_SettingValueVariable):
#/##################/#
# Call the parent method
#
#set
BaseClass.propertize_setWatchAfterParentWithParenterBool(self,_SettingValueVariable)
#/##################/#
# brian
#
#brian
self.brian()
#</DefineClass>
#<DefineLocals>
#set
BrianerClass.TeamingClassesDict.update(
{
'States':StatesClass,
'Samples':StatesClass
}
)
StatesClass.ManagingValueClass=BrianerClass
#</DefineLocals>
#</DefinePrint>
BrianerClass.PrintingClassSkipKeyStrsList.extend(
[
'BrianingNeurongroupDict',
'BrianingTraceDict',
'BrianingSpikesDict',
'BrianedNeurongroupVariable',
'BrianedTraceKeyStrsList',
'BrianedDeriveTracersList',
'BrianedStateMonitersList'
]
)
#<DefinePrint>
|
|
"""Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
#
# License: BSD, (C) INRIA, University of Amsterdam
import warnings
import numpy as np
from scipy.sparse import csr_matrix, issparse
from scipy.spatial.ckdtree import cKDTree
from .ball_tree import BallTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..utils import safe_asarray, atleast2d_or_csr
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def warn_equidistant():
msg = ("kneighbors: neighbor k+1 and neighbor k have the same "
"distance: results will be dependent on data order.")
warnings.warn(msg, NeighborsWarning, stacklevel=3)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
with np.errstate(divide='ignore'):
dist = 1. / dist
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(BaseEstimator):
"""Base class for nearest neighbors estimators."""
#FIXME: include float parameter p for using different distance metrics.
# this can be passed directly to BallTree and cKDTree. Brute-force will
# rely on soon-to-be-updated functionality in the pairwise module.
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30,
warn_on_equidistant=True, p=2):
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.warn_on_equidistant = warn_on_equidistant
self.p = p
if algorithm not in ['auto', 'brute', 'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if p < 1:
raise ValueError("p must be greater than or equal to 1")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, cKDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = safe_asarray(X)
if X.ndim != 2:
raise ValueError("data type not understood")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
self._fit_X = X.tocsr()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# BallTree outperforms the others in nearly any circumstance.
if self.n_neighbors is None:
self._fit_method = 'ball_tree'
elif self.n_neighbors < self._fit_X.shape[0] // 2:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'kd_tree':
self._tree = cKDTree(X, self.leaf_size)
elif self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size, p=self.p)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
return self
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to point, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method == None:
raise ValueError("must fit neighbors before querying")
X = atleast2d_or_csr(X)
if n_neighbors is None:
n_neighbors = self.n_neighbors
if self._fit_method == 'brute':
if self.p == 1:
dist = pairwise_distances(X, self._fit_X, 'manhattan')
elif self.p == 2:
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
elif self.p == np.inf:
dist = pairwise_distances(X, self._fit_X, 'chebyshev')
else:
dist = pairwise_distances(X, self._fit_X, 'minkowski',
p=self.p)
# XXX: should be implemented with a partial sort
neigh_ind = dist.argsort(axis=1)
if self.warn_on_equidistant and n_neighbors < self._fit_X.shape[0]:
ii = np.arange(dist.shape[0])
ind_k = neigh_ind[:, n_neighbors - 1]
ind_k1 = neigh_ind[:, n_neighbors]
if np.any(dist[ii, ind_k] == dist[ii, ind_k1]):
warn_equidistant()
neigh_ind = neigh_ind[:, :n_neighbors]
if return_distance:
j = np.arange(neigh_ind.shape[0])[:, None]
if self.p == 2:
return np.sqrt(dist[j, neigh_ind]), neigh_ind
else:
return dist[j, neigh_ind], neigh_ind
else:
return neigh_ind
elif self._fit_method == 'ball_tree':
result = self._tree.query(X, n_neighbors,
return_distance=return_distance)
if self.warn_on_equidistant and self._tree.warning_flag:
warn_equidistant()
return result
elif self._fit_method == 'kd_tree':
dist, ind = self._tree.query(X, n_neighbors, p=self.p)
# kd_tree returns a 1D array for n_neighbors = 1
if n_neighbors == 1:
dist = dist[:, None]
ind = ind[:, None]
if return_distance:
return dist, ind
else:
return ind
else:
raise ValueError("internal: _fit_method not recognized")
def kneighbors_graph(self, X, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.todense()
matrix([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
X = np.asarray(X)
if n_neighbors is None:
n_neighbors = self.n_neighbors
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones((n_samples1, n_neighbors))
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
data, ind = self.kneighbors(X, n_neighbors + 1,
return_distance=True)
A_data, A_ind = data[:, 1:], ind[:, 1:]
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
return csr_matrix((A_data.ravel(), A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors of a point within a given radius.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to point, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construnct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.radius_neighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 1.5, 0.5]]...), array([[1, 2]]...)
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Because the number of neighbors of each point is not necessarily
equal, `radius_neighbors` returns an array of objects, where each
object is a 1D array of indices.
"""
if self._fit_method == None:
raise ValueError("must fit neighbors before querying")
X = atleast2d_or_csr(X)
if radius is None:
radius = self.radius
if self._fit_method == 'brute':
if self.p == 1:
dist = pairwise_distances(X, self._fit_X, 'manhattan')
elif self.p == 2:
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
elif self.p == np.inf:
dist = pairwise_distances(X, self._fit_X, 'chebyshev')
else:
dist = pairwise_distances(X, self._fit_X, 'minkowski',
p=self.p)
neigh_ind = [np.where(d < radius)[0] for d in dist]
# if there are the same number of neighbors for each point,
# we can do a normal array. Otherwise, we return an object
# array with elements that are numpy arrays
try:
neigh_ind = np.asarray(neigh_ind, dtype=int)
dtype_F = float
except ValueError:
neigh_ind = np.asarray(neigh_ind, dtype='object')
dtype_F = object
if return_distance:
if self.p == 2:
dist = np.array([np.sqrt(d[neigh_ind[i]]) \
for i, d in enumerate(dist)],
dtype=dtype_F)
else:
dist = np.array([d[neigh_ind[i]] \
for i, d in enumerate(dist)],
dtype=dtype_F)
return dist, neigh_ind
else:
return neigh_ind
elif self._fit_method == 'ball_tree':
if return_distance:
ind, dist = self._tree.query_radius(X, radius,
return_distance=True)
return dist, ind
else:
ind = self._tree.query_radius(X, radius,
return_distance=False)
return ind
elif self._fit_method == 'kd_tree':
Npts = self._fit_X.shape[0]
dist, ind = self._tree.query(X, Npts,
distance_upper_bound=radius,
p=self.p)
ind = [ind_i[:ind_i.searchsorted(Npts)] for ind_i in ind]
# if there are the same number of neighbors for each point,
# we can do a normal array. Otherwise, we return an object
# array with elements that are numpy arrays
try:
ind = np.asarray(ind, dtype=int)
dtype_F = float
except ValueError:
ind = np.asarray(ind, dtype='object')
dtype_F = object
if return_distance:
dist = np.array([dist_i[:len(ind[i])]
for i, dist_i in enumerate(dist)],
dtype=dtype_F)
return dist, ind
else:
return ind
else:
raise ValueError("internal: _fit_method not recognized")
def radius_neighbors_graph(self, X, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.todense()
matrix([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
X = np.asarray(X)
if radius is None:
radius = self.radius
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_neighbors = np.array([len(a) for a in A_ind])
n_nonzero = np.sum(n_neighbors)
if A_data is None:
A_data = np.ones(n_nonzero)
A_ind = np.concatenate(list(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, cKDTree}
Training data. If array or matrix, then the shape
is [n_samples, n_features]
y : {array-like, sparse matrix}, shape = [n_samples]
Target values, array of float values.
"""
self._y = np.asarray(y)
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, cKDTree}
Training data. If array or matrix, then the shape
is [n_samples, n_features]
y : {array-like, sparse matrix}, shape = [n_samples]
Target values, array of integer values.
"""
self._y = np.asarray(y)
self._classes = np.sort(np.unique(y))
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, cKDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
"""
return self._fit(X)
|
|
# Copyright 2017 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Determine genomic ranges to perform local assembly."""
from third_party.nucleus.protos import reads_pb2
from third_party.nucleus.util import ranges
from deepvariant.protos import deepvariant_pb2
from deepvariant.protos import realigner_pb2
from deepvariant.python import allelecounter
from deepvariant.realigner.python import window_selector as cpp_window_selector
def _candidates_from_reads(config, ref_reader, reads, region):
"""Returns a list of candidate positions.
Args:
config: learning.genomics.deepvariant.realigner.WindowSelectorOptions
options determining the behavior of this window selector.
ref_reader: GenomeReference. Indexed reference genome to query bases.
reads: list[nucleus.protos.Read]. The reads we are processing into candidate
positions.
region: nucleus.protos.Range. The region we are processing.
Returns:
A list. The elements are reference positions within region.
Raises:
ValueError: if config.window_selector_model.model_type isn't a valid enum
name in realigner_pb2.WindowSelectorModel.ModelType.
"""
allele_counter_options = deepvariant_pb2.AlleleCounterOptions(
read_requirements=reads_pb2.ReadRequirements(
min_mapping_quality=config.min_mapq,
min_base_quality=config.min_base_quality),
keep_legacy_behavior=config.keep_legacy_behavior)
expanded_region = ranges.expand(
region,
config.region_expansion_in_bp,
contig_map=ranges.contigs_dict(ref_reader.header.contigs))
allele_counter = allelecounter.AlleleCounter(ref_reader.c_reader,
expanded_region, [],
allele_counter_options)
for read in reads:
allele_counter.add(read, 'placeholder_sample_id')
model_type = config.window_selector_model.model_type
if model_type == realigner_pb2.WindowSelectorModel.VARIANT_READS:
return _variant_reads_threshold_selector(
allele_counter, config.window_selector_model.variant_reads_model,
expanded_region)
elif model_type == realigner_pb2.WindowSelectorModel.ALLELE_COUNT_LINEAR:
return _allele_count_linear_selector(
allele_counter, config.window_selector_model.allele_count_linear_model,
expanded_region)
else:
raise ValueError('Unknown enum option "{}" for '
'WindowSelectorModel.model_type'.format(
config.window_selector_model.model_type))
def _variant_reads_threshold_selector(allele_counter, model_conf,
expanded_region):
"""Returns a list of candidate positions.
Following cigar operations generate candidate position:
- ALIGNMENT_MATCH, SEQUENCE_MISMATCH, SEQUENCE_MATCH: at mismatch positions
in the read when compared to the reference sequence.
- DELETE: at positions within [cigar_start, cigar_start + cigar_len)
- INSERT, CLIP_SOFT: at positions within
[cigar_start - cigar_len, cigar_start + cigar_len)
Note. Function implementation has changed to return positions beyond input
region in case we have variants there. See the change at internal and
internal.
Args:
allele_counter: learning.genomics.deepvariant.realigner.AlleleCounter in the
considered region.
model_conf: learning.genomics.deepvariant.realigner
.WindowSelectorOptions.VariantReadsThresholdModel options determining the
behavior of this window selector.
expanded_region: nucleus.protos.Range. The region we are processing.
Returns:
A list. The elements are reference positions within region.
"""
counts_vec = cpp_window_selector.variant_reads_candidates_from_allele_counter(
allele_counter)
return [
expanded_region.start + i
for i, count in enumerate(counts_vec)
if (count >= model_conf.min_num_supporting_reads and
count <= model_conf.max_num_supporting_reads)
]
def _allele_count_linear_selector(allele_counter, model_conf, expanded_region):
"""Returns a list of candidate positions.
Candidate positions for realignment are generated by scoring each location.
The score at a location is a weighted sum of the number of reads with each
CIGAR operation at the location, where the weights are determined by the model
coefficients. Locations whose score exceed the model decision boundary value
are used to create realignment windows.
Note. Function implementation has changed to return positions beyond input
region in case we have variants there. See the change at internal and
internal.
Args:
allele_counter: learning.genomics.deepvariant.realigner.AlleleCounter in the
considered region.
model_conf: learning.genomics.deepvariant.realigner
.WindowSelectorOptions.AlleleCountLinearModel options determining the
behavior of this window selector.
expanded_region: nucleus.protos.Range. The region we are processing.
Returns:
A list. The elements are reference positions within region.
"""
scores_vec = (
cpp_window_selector.allele_count_linear_candidates_from_allele_counter(
allele_counter, model_conf))
return [
expanded_region.start + i
for i, score in enumerate(scores_vec)
if score > model_conf.decision_boundary
]
def _candidates_to_windows(config, candidate_pos, ref_name):
""""Process candidate positions to determine windows for local assembly.
Windows are within range of
[min(pos) - config.min_windows_distance,
max(pos) + config.min_windows_distance)
Args:
config: learning.genomics.deepvariant.realigner.WindowSelectorOptions
options determining the behavior of this window selector.
candidate_pos: A list of ref_pos.
ref_name: Reference name, used in setting the output
genomics.range.reference_name value.
Returns:
A sorted list of nucleus.protos.Range protos for all windows in this region.
"""
windows = []
def _add_window(start_pos, end_pos):
windows.append(
ranges.make_range(ref_name, start_pos - config.min_windows_distance,
end_pos + config.min_windows_distance))
start_pos, end_pos = None, None
for pos in sorted(candidate_pos):
if start_pos is None:
start_pos = pos
end_pos = pos
# We need to check if the previous end_pos is within 2*window_distance as we
# generate a window of radius window_distance around each position.
#
# <-------end_pos------->
# <-------pos------->
# where window_distance = ------->
#
# If this is the case, we need to merge the two windows.
elif pos > end_pos + 2 * config.min_windows_distance:
_add_window(start_pos, end_pos)
start_pos = pos
end_pos = pos
else:
end_pos = pos
if start_pos is not None:
_add_window(start_pos, end_pos)
return sorted(windows, key=ranges.as_tuple)
def select_windows(config, ref_reader, reads, region):
""""Process reads to determine candidate windows for local assembly.
Windows are within range of
[0 - config.min_windows_distance, ref_len + config.min_windows_distance)
Args:
config: learning.genomics.deepvariant.realigner.WindowSelectorOptions
options determining the behavior of this window selector.
ref_reader: GenomeReference. Indexed reference genome to query bases.
reads: A list of genomics.Read records.
region: nucleus.protos.Range. The region we are processing.
Returns:
A list of nucleus.protos.Range protos sorted by their genomic position.
"""
# This is a fast path for the case where we have no reads, so we have no
# windows to assemble.
if not reads:
return []
candidates = _candidates_from_reads(config, ref_reader, reads, region)
return _candidates_to_windows(config, candidates, region.reference_name)
|
|
"""The tests for Alarm control panel device triggers."""
import pytest
from homeassistant.components.alarm_control_panel import DOMAIN
import homeassistant.components.automation as automation
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
)
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a alarm_control_panel."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
hass.states.async_set(
"alarm_control_panel.test_5678", "attributes", {"supported_features": 15}
)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "disarmed",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "triggered",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "arming",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "armed_home",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "armed_away",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "armed_night",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, expected_triggers)
async def test_if_fires_on_state_change(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
hass.states.async_set("alarm_control_panel.entity", STATE_ALARM_PENDING)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "alarm_control_panel.entity",
"type": "triggered",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"triggered - {{ trigger.platform}} - "
"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - "
"{{ trigger.to_state.state}} - {{ trigger.for }}"
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "alarm_control_panel.entity",
"type": "disarmed",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"disarmed - {{ trigger.platform}} - "
"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - "
"{{ trigger.to_state.state}} - {{ trigger.for }}"
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "alarm_control_panel.entity",
"type": "armed_home",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"armed_home - {{ trigger.platform}} - "
"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - "
"{{ trigger.to_state.state}} - {{ trigger.for }}"
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "alarm_control_panel.entity",
"type": "armed_away",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"armed_away - {{ trigger.platform}} - "
"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - "
"{{ trigger.to_state.state}} - {{ trigger.for }}"
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "alarm_control_panel.entity",
"type": "armed_night",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"armed_night - {{ trigger.platform}} - "
"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - "
"{{ trigger.to_state.state}} - {{ trigger.for }}"
)
},
},
},
]
},
)
# Fake that the entity is triggered.
hass.states.async_set("alarm_control_panel.entity", STATE_ALARM_TRIGGERED)
await hass.async_block_till_done()
assert len(calls) == 1
assert (
calls[0].data["some"]
== "triggered - device - alarm_control_panel.entity - pending - triggered - None"
)
# Fake that the entity is disarmed.
hass.states.async_set("alarm_control_panel.entity", STATE_ALARM_DISARMED)
await hass.async_block_till_done()
assert len(calls) == 2
assert (
calls[1].data["some"]
== "disarmed - device - alarm_control_panel.entity - triggered - disarmed - None"
)
# Fake that the entity is armed home.
hass.states.async_set("alarm_control_panel.entity", STATE_ALARM_ARMED_HOME)
await hass.async_block_till_done()
assert len(calls) == 3
assert (
calls[2].data["some"]
== "armed_home - device - alarm_control_panel.entity - disarmed - armed_home - None"
)
# Fake that the entity is armed away.
hass.states.async_set("alarm_control_panel.entity", STATE_ALARM_ARMED_AWAY)
await hass.async_block_till_done()
assert len(calls) == 4
assert (
calls[3].data["some"]
== "armed_away - device - alarm_control_panel.entity - armed_home - armed_away - None"
)
# Fake that the entity is armed night.
hass.states.async_set("alarm_control_panel.entity", STATE_ALARM_ARMED_NIGHT)
await hass.async_block_till_done()
assert len(calls) == 5
assert (
calls[4].data["some"]
== "armed_night - device - alarm_control_panel.entity - armed_away - armed_night - None"
)
|
|
"""
webencodings.labels
~~~~~~~~~~~~~~~~~~~
Map encoding labels to their name.
:copyright: Copyright 2012 by Simon Sapin
:license: BSD, see LICENSE for details.
"""
# XXX Do not edit!
# This file is automatically generated by mklabels.py
LABELS = {
'unicode-1-1-utf-8': 'utf-8',
'utf-8': 'utf-8',
'utf8': 'utf-8',
'866': 'ibm866',
'cp866': 'ibm866',
'csibm866': 'ibm866',
'ibm866': 'ibm866',
'csisolatin2': 'iso-8859-2',
'iso-8859-2': 'iso-8859-2',
'iso-ir-101': 'iso-8859-2',
'iso8859-2': 'iso-8859-2',
'iso88592': 'iso-8859-2',
'iso_8859-2': 'iso-8859-2',
'iso_8859-2:1987': 'iso-8859-2',
'l2': 'iso-8859-2',
'latin2': 'iso-8859-2',
'csisolatin3': 'iso-8859-3',
'iso-8859-3': 'iso-8859-3',
'iso-ir-109': 'iso-8859-3',
'iso8859-3': 'iso-8859-3',
'iso88593': 'iso-8859-3',
'iso_8859-3': 'iso-8859-3',
'iso_8859-3:1988': 'iso-8859-3',
'l3': 'iso-8859-3',
'latin3': 'iso-8859-3',
'csisolatin4': 'iso-8859-4',
'iso-8859-4': 'iso-8859-4',
'iso-ir-110': 'iso-8859-4',
'iso8859-4': 'iso-8859-4',
'iso88594': 'iso-8859-4',
'iso_8859-4': 'iso-8859-4',
'iso_8859-4:1988': 'iso-8859-4',
'l4': 'iso-8859-4',
'latin4': 'iso-8859-4',
'csisolatincyrillic': 'iso-8859-5',
'cyrillic': 'iso-8859-5',
'iso-8859-5': 'iso-8859-5',
'iso-ir-144': 'iso-8859-5',
'iso8859-5': 'iso-8859-5',
'iso88595': 'iso-8859-5',
'iso_8859-5': 'iso-8859-5',
'iso_8859-5:1988': 'iso-8859-5',
'arabic': 'iso-8859-6',
'asmo-708': 'iso-8859-6',
'csiso88596e': 'iso-8859-6',
'csiso88596i': 'iso-8859-6',
'csisolatinarabic': 'iso-8859-6',
'ecma-114': 'iso-8859-6',
'iso-8859-6': 'iso-8859-6',
'iso-8859-6-e': 'iso-8859-6',
'iso-8859-6-i': 'iso-8859-6',
'iso-ir-127': 'iso-8859-6',
'iso8859-6': 'iso-8859-6',
'iso88596': 'iso-8859-6',
'iso_8859-6': 'iso-8859-6',
'iso_8859-6:1987': 'iso-8859-6',
'csisolatingreek': 'iso-8859-7',
'ecma-118': 'iso-8859-7',
'elot_928': 'iso-8859-7',
'greek': 'iso-8859-7',
'greek8': 'iso-8859-7',
'iso-8859-7': 'iso-8859-7',
'iso-ir-126': 'iso-8859-7',
'iso8859-7': 'iso-8859-7',
'iso88597': 'iso-8859-7',
'iso_8859-7': 'iso-8859-7',
'iso_8859-7:1987': 'iso-8859-7',
'sun_eu_greek': 'iso-8859-7',
'csiso88598e': 'iso-8859-8',
'csisolatinhebrew': 'iso-8859-8',
'hebrew': 'iso-8859-8',
'iso-8859-8': 'iso-8859-8',
'iso-8859-8-e': 'iso-8859-8',
'iso-ir-138': 'iso-8859-8',
'iso8859-8': 'iso-8859-8',
'iso88598': 'iso-8859-8',
'iso_8859-8': 'iso-8859-8',
'iso_8859-8:1988': 'iso-8859-8',
'visual': 'iso-8859-8',
'csiso88598i': 'iso-8859-8-i',
'iso-8859-8-i': 'iso-8859-8-i',
'logical': 'iso-8859-8-i',
'csisolatin6': 'iso-8859-10',
'iso-8859-10': 'iso-8859-10',
'iso-ir-157': 'iso-8859-10',
'iso8859-10': 'iso-8859-10',
'iso885910': 'iso-8859-10',
'l6': 'iso-8859-10',
'latin6': 'iso-8859-10',
'iso-8859-13': 'iso-8859-13',
'iso8859-13': 'iso-8859-13',
'iso885913': 'iso-8859-13',
'iso-8859-14': 'iso-8859-14',
'iso8859-14': 'iso-8859-14',
'iso885914': 'iso-8859-14',
'csisolatin9': 'iso-8859-15',
'iso-8859-15': 'iso-8859-15',
'iso8859-15': 'iso-8859-15',
'iso885915': 'iso-8859-15',
'iso_8859-15': 'iso-8859-15',
'l9': 'iso-8859-15',
'iso-8859-16': 'iso-8859-16',
'cskoi8r': 'koi8-r',
'koi': 'koi8-r',
'koi8': 'koi8-r',
'koi8-r': 'koi8-r',
'koi8_r': 'koi8-r',
'koi8-u': 'koi8-u',
'csmacintosh': 'macintosh',
'mac': 'macintosh',
'macintosh': 'macintosh',
'x-mac-roman': 'macintosh',
'dos-874': 'windows-874',
'iso-8859-11': 'windows-874',
'iso8859-11': 'windows-874',
'iso885911': 'windows-874',
'tis-620': 'windows-874',
'windows-874': 'windows-874',
'cp1250': 'windows-1250',
'windows-1250': 'windows-1250',
'x-cp1250': 'windows-1250',
'cp1251': 'windows-1251',
'windows-1251': 'windows-1251',
'x-cp1251': 'windows-1251',
'ansi_x3.4-1968': 'windows-1252',
'ascii': 'windows-1252',
'cp1252': 'windows-1252',
'cp819': 'windows-1252',
'csisolatin1': 'windows-1252',
'ibm819': 'windows-1252',
'iso-8859-1': 'windows-1252',
'iso-ir-100': 'windows-1252',
'iso8859-1': 'windows-1252',
'iso88591': 'windows-1252',
'iso_8859-1': 'windows-1252',
'iso_8859-1:1987': 'windows-1252',
'l1': 'windows-1252',
'latin1': 'windows-1252',
'us-ascii': 'windows-1252',
'windows-1252': 'windows-1252',
'x-cp1252': 'windows-1252',
'cp1253': 'windows-1253',
'windows-1253': 'windows-1253',
'x-cp1253': 'windows-1253',
'cp1254': 'windows-1254',
'csisolatin5': 'windows-1254',
'iso-8859-9': 'windows-1254',
'iso-ir-148': 'windows-1254',
'iso8859-9': 'windows-1254',
'iso88599': 'windows-1254',
'iso_8859-9': 'windows-1254',
'iso_8859-9:1989': 'windows-1254',
'l5': 'windows-1254',
'latin5': 'windows-1254',
'windows-1254': 'windows-1254',
'x-cp1254': 'windows-1254',
'cp1255': 'windows-1255',
'windows-1255': 'windows-1255',
'x-cp1255': 'windows-1255',
'cp1256': 'windows-1256',
'windows-1256': 'windows-1256',
'x-cp1256': 'windows-1256',
'cp1257': 'windows-1257',
'windows-1257': 'windows-1257',
'x-cp1257': 'windows-1257',
'cp1258': 'windows-1258',
'windows-1258': 'windows-1258',
'x-cp1258': 'windows-1258',
'x-mac-cyrillic': 'x-mac-cyrillic',
'x-mac-ukrainian': 'x-mac-cyrillic',
'chinese': 'gbk',
'csgb2312': 'gbk',
'csiso58gb231280': 'gbk',
'gb2312': 'gbk',
'gb_2312': 'gbk',
'gb_2312-80': 'gbk',
'gbk': 'gbk',
'iso-ir-58': 'gbk',
'x-gbk': 'gbk',
'gb18030': 'gb18030',
'hz-gb-2312': 'hz-gb-2312',
'big5': 'big5',
'big5-hkscs': 'big5',
'cn-big5': 'big5',
'csbig5': 'big5',
'x-x-big5': 'big5',
'cseucpkdfmtjapanese': 'euc-jp',
'euc-jp': 'euc-jp',
'x-euc-jp': 'euc-jp',
'csiso2022jp': 'iso-2022-jp',
'iso-2022-jp': 'iso-2022-jp',
'csshiftjis': 'shift_jis',
'ms_kanji': 'shift_jis',
'shift-jis': 'shift_jis',
'shift_jis': 'shift_jis',
'sjis': 'shift_jis',
'windows-31j': 'shift_jis',
'x-sjis': 'shift_jis',
'cseuckr': 'euc-kr',
'csksc56011987': 'euc-kr',
'euc-kr': 'euc-kr',
'iso-ir-149': 'euc-kr',
'korean': 'euc-kr',
'ks_c_5601-1987': 'euc-kr',
'ks_c_5601-1989': 'euc-kr',
'ksc5601': 'euc-kr',
'ksc_5601': 'euc-kr',
'windows-949': 'euc-kr',
'csiso2022kr': 'iso-2022-kr',
'iso-2022-kr': 'iso-2022-kr',
'utf-16be': 'utf-16be',
'utf-16': 'utf-16le',
'utf-16le': 'utf-16le',
'x-user-defined': 'x-user-defined',
}
|
|
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import errno
from pycalico import netns
from pycalico.ipam import IPAMClient
from pycalico.datastore import Rules, Rule
from pycalico.block import AlreadyAssignedError
from pycalico.datastore_errors import DataStoreError
from netaddr import IPAddress, AddrFormatError
import json
import logging
import logging.handlers
import traceback
import re
from subprocess import check_output, CalledProcessError
from netaddr import IPNetwork
import socket
LOGFILE = "/var/log/calico/isolator.log"
ORCHESTRATOR_ID = "mesos"
ERROR_MISSING_COMMAND = "Missing command"
ERROR_MISSING_CONTAINER_ID = "Missing container_id"
ERROR_MISSING_HOSTNAME = "Missing hostname"
ERROR_MISSING_PID = "Missing pid"
ERROR_UNKNOWN_COMMAND = "Unknown command: %s"
ERROR_MISSING_ARGS = "Missing args"
datastore = IPAMClient()
_log = logging.getLogger("CALICOMESOS")
HOSTNAME = socket.gethostname()
def calico_mesos():
"""
Module function which parses JSON from stdin and calls the appropriate
plugin function. Input JSON data looks like the following:
{
"command": "allocate|isolate|reserve|cleanup",
"args": {}
}
Args will vary depending on which function is called. See the docstring
of each of the listed command functions for accepted args.
:return:
"""
stdin_raw_data = sys.stdin.read()
_log.info("Received request: %s" % stdin_raw_data)
# Convert input data to JSON object
try:
stdin_json = json.loads(stdin_raw_data)
except ValueError as e:
raise IsolatorException(str(e))
# Extract command
try:
command = stdin_json['command']
except KeyError:
raise IsolatorException(ERROR_MISSING_COMMAND)
# Extract args
try:
args = stdin_json['args']
except KeyError:
raise IsolatorException(ERROR_MISSING_ARGS)
# Labels are passed in as JSONified protobufs, which look like the following:
# {
# "args": {
# ...
# "labels": [
# { "key": "mykey1", "value": "myvalue1" },
# { "key": "mykey2", "value": "myvalue2" }
# ]
# }
# }
#
# Update them to a more pythonic representation:
# {
# "args": {
# ...
# "labels": {
# "mykey1": "myvalue1",
# "mykey2": "myvalue2"
# }
# }
# }
labels = args.get("labels")
if labels:
args['labels'] = {label['key']: label['value'] for label in labels}
_log.info("Fixed request to be: %s" % str(args))
# Call command with args
_log.debug("Executing %s" % command)
if command == 'isolate':
return isolate(args)
elif command == 'cleanup':
return cleanup(args)
elif command == 'allocate':
return allocate(args)
elif command == 'reserve':
return reserve(args)
elif command == 'release':
return release(args)
else:
raise IsolatorException(ERROR_UNKNOWN_COMMAND % command)
def _setup_logging(logfile):
# Ensure directory exists.
try:
os.makedirs(os.path.dirname(LOGFILE))
except OSError as oserr:
if oserr.errno != errno.EEXIST:
raise
_log.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(process)d [%(levelname)s] '
'%(name)s %(lineno)d: %(message)s')
handler = logging.handlers.TimedRotatingFileHandler(logfile,
when='D',
backupCount=10)
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
_log.addHandler(handler)
def _validate_ip_addrs(ip_addrs, ip_version=None):
if not isinstance(ip_addrs, list):
raise IsolatorException(
"IP addresses must be provided as JSON list, not: %s" %
type(ip_addrs))
validated_ip_addrs = []
for ip_addr in ip_addrs:
try:
ip = IPAddress(ip_addr)
except AddrFormatError:
raise IsolatorException("IP address could not be parsed: %s" %
ip_addr)
if ip_version and ip.version != ip_version:
raise IsolatorException("IPv%d address must not be placed in IPv%d"
" address field." %
(ip.version, ip_version))
else:
validated_ip_addrs.append(ip)
return validated_ip_addrs
def _create_profile_for_host_communication(profile_name):
"""
Create a profile which allows traffic to and from the host.
"""
_log.info("Autocreating profile %s", profile_name)
datastore.create_profile(profile_name)
prof = datastore.get_profile(profile_name)
host_net = str(_get_host_ip_net())
_log.info("adding accept rule for %s" % host_net)
allow_from_slave = Rule(action="allow", src_net=host_net)
allow_to_slave = Rule(action="allow", dst_net=host_net)
prof.rules = Rules(id=profile_name,
inbound_rules=[allow_from_slave],
outbound_rules=[allow_to_slave])
datastore.profile_update_rules(prof)
def _create_profile_for_netgroup(profile_name):
"""
Create a profile which allows traffic from other Endpoints in the same
profile.
"""
_log.info("Autocreating profile %s", profile_name)
datastore.create_profile(profile_name)
prof = datastore.get_profile(profile_name)
allow_from_profile = Rule(action="allow", src_tag=profile_name)
allow_to_all = Rule(action="allow")
prof.rules = Rules(id=profile_name,
inbound_rules=[allow_from_profile],
outbound_rules=[allow_to_all])
datastore.profile_update_rules(prof)
def _create_profile_for_public_communication(profile_name):
"""
Create a public profile which allows open traffic from all.
"""
_log.info("Creating public profile: %s", profile_name)
datastore.create_profile(profile_name)
prof = datastore.get_profile(profile_name)
allow_all = Rule(action="allow")
prof.rules = Rules(id=profile_name,
inbound_rules=[allow_all],
outbound_rules=[allow_all])
datastore.profile_update_rules(prof)
def _get_host_ip_net():
"""
Gets the IP Address of the host.
Ignores Loopback and docker0 Addresses.
"""
IP_SUBNET_RE = re.compile(r'inet ((?:\d+\.){3}\d+\/\d+)')
INTERFACE_SPLIT_RE = re.compile(r'(\d+:.*(?:\n\s+.*)+)')
IFACE_RE = re.compile(r'^\d+: (\S+):')
# Call `ip addr`.
try:
ip_addr_output = check_output(["ip", "-4", "addr"])
except CalledProcessError, OSError:
raise IsolatorException("Could not read host IP")
# Separate interface blocks from ip addr output and iterate.
for iface_block in INTERFACE_SPLIT_RE.findall(ip_addr_output):
# Exclude certain interfaces.
match = IFACE_RE.match(iface_block)
if match and match.group(1) not in ["docker0", "lo"]:
# Iterate through Addresses on interface.
for address in IP_SUBNET_RE.findall(iface_block):
ip_net = IPNetwork(address)
if not ip_net.ip.is_loopback():
# Select just the host IP address, not the entire subnet
# it belongs, since we only want this host to communicate
# with the executor.
return IPNetwork(ip_net.ip)
raise IsolatorException("Couldn't determine host's IP Address.")
def isolate(args):
"""
Toplevel function which validates and sanitizes json args into variables
which can be passed to _isolate.
"args": {
"hostname": "slave-H3A-1", # Required
"container_id": "ba11f1de-fc4d-46fd-9f15-424f4ef05a3a", # Required
"ipv4_addrs": ["192.168.23.4"], # Not Required
"ipv6_addrs": ["2001:3ac3:f90b:1111::1"], # Not Required
"netgroups": ["prod", "frontend"], # Required.
"labels": { # Optional.
"rack": "3A",
"pop": "houston"
}
"""
hostname = args.get("hostname")
container_id = args.get("container_id")
pid = args.get("pid")
ipv4_addrs = args.get("ipv4_addrs", [])
ipv6_addrs = args.get("ipv6_addrs", [])
netgroups = args.get("netgroups", [])
labels = args.get("labels")
# Validate Container ID
if not container_id:
raise IsolatorException(ERROR_MISSING_CONTAINER_ID)
if not hostname:
raise IsolatorException(ERROR_MISSING_HOSTNAME)
if not pid:
raise IsolatorException(ERROR_MISSING_PID)
# Validate IPv4 Addresses
ipv4_addrs_validated = _validate_ip_addrs(ipv4_addrs, 4)
# Validate IPv6 Addresses
ipv6_addrs_validated = _validate_ip_addrs(ipv6_addrs, 6)
if not ipv4_addrs_validated + ipv6_addrs_validated:
raise IsolatorException("Must provide at least one IPv4 or IPv6 address.")
# Validate that netgroups are present
if not isinstance(netgroups, list):
raise IsolatorException("Must provide list of netgroups.")
_isolate(hostname, pid, container_id, ipv4_addrs_validated, ipv6_addrs_validated, netgroups, labels)
_log.debug("Request completed.")
def _isolate(hostname, ns_pid, container_id, ipv4_addrs, ipv6_addrs, profiles, labels):
"""
Configure networking for a container.
This function performs the following steps:
1.) Create endpoint in memory
2.) Fill endpoint with data
3.) Configure network to match the filled endpoint's specifications
4.) Write endpoint to etcd
:param hostname: Hostname of the slave which the container is running on
:param container_id: The container's ID
:param ipv4_addrs: List of desired IPv4 addresses to be assigned to the endpoint
:param ipv6_addrs: List of desired IPv6 addresses to be assigned to the endpoint
:param profiles: List of desired profiles to be assigned to the endpoint
:param labels: TODO
:return: None
"""
_log.info("Preparing network for Container with ID %s", container_id)
_log.info("IP: %s, Profile %s", ipv4_addrs, profiles)
# Exit if the endpoint has already been configured
if len(datastore.get_endpoints(hostname=HOSTNAME,
orchestrator_id=ORCHESTRATOR_ID,
workload_id=container_id)) == 1:
raise IsolatorException("This container has already been configured "
"with Calico Networking.")
# Create the endpoint
ep = datastore.create_endpoint(hostname=HOSTNAME,
orchestrator_id=ORCHESTRATOR_ID,
workload_id=container_id,
ip_list=ipv4_addrs)
# Create any profiles in etcd that do not already exist
assigned_profiles = []
_log.info("Assigning Profiles: %s" % profiles)
# First remove any keyword profile names
try:
profiles.remove("public")
except ValueError:
pass
else:
_log.info("Assigning Public Profile")
if not datastore.profile_exists("public"):
_create_profile_for_public_communication("public")
assigned_profiles.append("public")
# Assign remaining netgroup profiles
for profile in profiles:
profile = "ng_%s" % profile
if not datastore.profile_exists(profile):
_log.info("Assigning Netgroup Profile: %s" % profile)
_create_profile_for_netgroup(profile)
assigned_profiles.append(profile)
# Insert the host-communication profile
default_profile_name = "default_%s" % hostname
_log.info("Assigning Default Host Profile: %s" % default_profile_name)
if not datastore.profile_exists(default_profile_name):
_create_profile_for_host_communication(default_profile_name)
assigned_profiles.insert(0, default_profile_name)
# Call through to complete the network setup matching this endpoint
ep.profile_ids = assigned_profiles
try:
ep.mac = ep.provision_veth(netns.PidNamespace(ns_pid), "eth0")
except netns.NamespaceError as e:
raise IsolatorException(e.message)
datastore.set_endpoint(ep)
_log.info("Finished networking for container %s", container_id)
def cleanup(args):
hostname = args.get("hostname")
container_id = args.get("container_id")
if not container_id:
raise IsolatorException(ERROR_MISSING_CONTAINER_ID)
if not hostname:
raise IsolatorException(ERROR_MISSING_HOSTNAME)
_cleanup(hostname, container_id)
def _cleanup(hostname, container_id):
_log.info("Cleaning executor with Container ID %s.", container_id)
try:
endpoint = datastore.get_endpoint(hostname=HOSTNAME,
orchestrator_id=ORCHESTRATOR_ID,
workload_id=container_id)
except KeyError:
raise IsolatorException("No endpoint found with container-id: %s" % container_id)
# Release IP addresses.
ips = {net.ip for net in endpoint.ipv4_nets | endpoint.ipv6_nets}
_log.info("%s | Release IPs %s", container_id, ips)
datastore.release_ips(ips)
# Remove the endpoint
_log.info("Removing veth for endpoint %s", endpoint.endpoint_id)
datastore.remove_endpoint(endpoint)
# Remove the container from the datastore.
datastore.remove_workload(hostname=HOSTNAME,
orchestrator_id=ORCHESTRATOR_ID,
workload_id=container_id)
_log.info("Cleanup complete for container %s", container_id)
def reserve(args):
"""
Toplevel function which validates and sanitizes dictionary of args
which can be passed to _reserve. Calico's reserve does not make use of
netgroups or labels, so they are ignored.
"args": {
"hostname": "slave-0-1", # Required
# At least one of "ipv4_addrs" and "ipv6_addrs" must be present.
"ipv4_addrs": ["192.168.23.4"],
"ipv6_addrs": ["2001:3ac3:f90b:1111::1", "2001:3ac3:f90b:1111::2"],
"uid": "0cd47986-24ad-4c00-b9d3-5db9e5c02028",
"netgroups": ["prod", "frontend"], # Optional.
"labels": { # Optional.
"rack": "3A",
"pop": "houston"
}
}
"""
hostname = args.get("hostname")
ipv4_addrs = args.get("ipv4_addrs", [])
ipv6_addrs = args.get("ipv6_addrs", [])
uid = args.get("uid")
# Validations
if not uid:
raise IsolatorException("Missing uid")
try:
# Convert to string since libcalico requires uids to be strings
uid = str(uid)
except ValueError:
raise IsolatorException("Invalid UID: %s" % uid)
if hostname is None:
raise IsolatorException(ERROR_MISSING_HOSTNAME)
# Validate IP addresses
ipv4_addrs_validated = _validate_ip_addrs(ipv4_addrs, 4)
ipv6_addrs_validated = _validate_ip_addrs(ipv6_addrs, 6)
if not ipv4_addrs_validated + ipv6_addrs_validated:
raise IsolatorException("Must provide at least one IPv4 or IPv6 address.")
return _reserve(hostname, uid, ipv4_addrs_validated, ipv6_addrs_validated)
def _reserve(hostname, uid, ipv4_addrs, ipv6_addrs):
"""
Reserve an IP from the IPAM.
:param hostname: The host agent which is reserving this IP
:param uid: A unique ID, which is indexed by the IPAM module and can be
used to release all addresses with the uid.
:param ipv4_addrs: List of IPAddress objects representing requested IPv4
addresses.
:param ipv6_addrs: List of IPAddress objects representing requested IPv6
addresses.
:return:
"""
_log.info("Reserving. hostname: %s, uid: %s, ipv4_addrs: %s, ipv6_addrs: %s" % \
(HOSTNAME, uid, ipv4_addrs, ipv6_addrs))
assigned_ips = []
try:
for ip_addr in ipv4_addrs + ipv6_addrs:
datastore.assign_ip(ip_addr, uid, {}, host=HOSTNAME)
assigned_ips.append(ip_addr)
# Keep track of succesfully assigned ip_addrs in case we need to rollback
except (RuntimeError, ValueError, AlreadyAssignedError):
failed_addr = ip_addr
_log.error("Couldn't reserve %s. Attempting rollback." % (ip_addr))
# Rollback assigned ip_addrs
datastore.release_ips(set(assigned_ips))
raise IsolatorException("IP '%s' already in use." % failed_addr)
def allocate(args):
"""
Toplevel function which validates and sanitizes json args into variables
which can be passed to _allocate.
args = {
"hostname": "slave-0-1", # Required
"num_ipv4": 1, # Required.
"num_ipv6": 2, # Required.
"uid": "0cd47986-24ad-4c00-b9d3-5db9e5c02028", # Required
"netgroups": ["prod", "frontend"], # Optional.
"labels": { # Optional.
"rack": "3A",
"pop": "houston"
}
}
"""
hostname = args.get("hostname")
uid = args.get("uid")
num_ipv4 = args.get("num_ipv4")
num_ipv6 = args.get("num_ipv6")
netgroups = args.get("netgroups")
labels = args.get("labels", {})
# Validations
if not uid:
raise IsolatorException("Missing uid")
try:
# Convert to string since libcalico requires uids to be strings
uid = str(uid)
except ValueError:
raise IsolatorException("Invalid UID: %s" % uid)
if hostname is None:
raise IsolatorException(ERROR_MISSING_HOSTNAME)
if num_ipv4 is None:
raise IsolatorException("Missing num_ipv4")
if num_ipv6 is None:
raise IsolatorException("Missing num_ipv6")
if not isinstance(num_ipv4, (int, long)):
try:
num_ipv4 = int(num_ipv4)
except TypeError:
raise IsolatorException("num_ipv4 must be an integer")
if not isinstance(num_ipv6, (int, long)):
try:
num_ipv6 = int(num_ipv6)
except TypeError:
raise IsolatorException("num_ipv6 must be an integer")
# Check if the user has requested a specific IP via label
# Note: this will be deprecated once marathon provides the ipv4_addrs
# field which will trigger netmodules' 'reserve'.
# This implementation replaces the requested IPAM IP with the static IP.
# i.e. If a user requests 1 IP, and specifies one specific IP in the
# ipv4_addr label, they will receive 1 IP back - the one they specified.
ipv4_addrs = []
if labels.has_key("ipv4_addrs"):
try:
ipv4_addrs = eval(labels['ipv4_addrs'])
except SyntaxError:
raise IsolatorException("Calico detected a malformed ipv4_addrs "
"field. Ensure you've specified a string representation "
"of a list of strings.")
# 'reserve' will sanitize the IP Addresses for us
reserve({"hostname": hostname,
"ipv4_addrs": ipv4_addrs,
"ipv6_addrs": [],
"uid": uid,
"netgroups": netgroups,
"labels": labels})
# Decrement how many IPAM'd IPs they will be getting by how many they
# requested by label.
num_ipv4 = max(num_ipv4 - len(ipv4_addrs), 0)
result = _allocate(num_ipv4, num_ipv6, hostname, uid)
result['ipv4'] += ipv4_addrs
return json.dumps(result)
def _allocate(num_ipv4, num_ipv6, hostname, uid):
"""
Allocate IP addresses from the data store.
:param num_ipv4: Number of IPv4 addresses to request.
:param num_ipv6: Number of IPv6 addresses to request.
:param hostname: The hostname of this host.
:param uid: A unique ID, which is indexed by the IPAM module and can be
used to release all addresses with the uid.
:return: Dictionary of the result in the following format:
{
"ipv4": ["192.168.23.4"],
"ipv6": ["2001:3ac3:f90b:1111::1", "2001:3ac3:f90b:1111::2"],
"error": None # Not None indicates error and contains error message.
}
"""
result = datastore.auto_assign_ips(num_ipv4, num_ipv6, uid, {},
host=HOSTNAME)
ipv4_strs = [str(ip) for ip in result[0]]
ipv6_strs = [str(ip) for ip in result[1]]
return {"ipv4": ipv4_strs,
"ipv6": ipv6_strs,
"error": None}
def release(args):
"""
Toplevel function which validates and sanitizes json args into variables
which can be passed to _release_uid or _release_ips.
args: {
"uid": "0cd47986-24ad-4c00-b9d3-5db9e5c02028",
# OR
"ips": ["192.168.23.4", "2001:3ac3:f90b:1111::1"] # OK to mix 6 & 4
}
Must include a uid or ips, but not both. If a uid is passed, release all
addresses with that uid.
If a list of ips is passed, release those IPs.
"""
uid = args.get("uid")
ips = args.get("ips")
if uid is None:
if ips is None:
raise IsolatorException("Must supply either uid or ips.")
else:
ips_validated = _validate_ip_addrs(ips)
return _release_ips(set(ips_validated))
else:
# uid supplied.
if ips is not None:
raise IsolatorException("Supply either uid or ips, not both.")
else:
if not isinstance(uid, (str, unicode)):
raise IsolatorException("uid must be a string")
# uid validated.
return _release_uid(uid)
def _release_ips(ips):
"""
Release the given IPs using the data store.
:param ips: Set of IPAddress objects to release.
:return: None
"""
# release_ips returns a set of addresses that were already not allocated
# when this function was called. But, Mesos doesn't consume that
# information, so we ignore it.
_ = datastore.release_ips(ips)
def _release_uid(uid):
"""
Release all IP addresses with the given unique ID using the data store.
:param uid: The unique ID used to allocate the IPs.
:return: None
"""
_ = datastore.release_ip_by_handle(uid)
def _error_message(msg=None):
"""
Helper function to convert error messages into the JSON format.
"""
return json.dumps({"error": msg})
class IsolatorException(Exception):
pass
if __name__ == '__main__':
_setup_logging(LOGFILE)
try:
response = calico_mesos()
except IsolatorException as e:
_log.error(e)
sys.stdout.write(_error_message(str(e)))
sys.exit(1)
except DatastoreError as e:
# Encountered an etcd error
_log.error(e)
# Try to give a more helpful error message depending on whether or not
# the user has set ETCD_AUTHORITY
try:
etcd_authority = os.environ['ETCD_AUTHORITY']
error_message = "Failed to communicate with etcd at '%s'. " \
"Ensure that it is up and running or change ETCD_AUTHORITY" \
" environment variable used by the meoss-agent process." % \
etcd_authority
except KeyError:
error_message = "Failed to communicate with etcd. ETCD_AUTHORITY " \
"is not set for this agent process."
sys.stdout.write(_error_message(error_message))
sys.exit(1)
except Exception as e:
_log.error(e)
sys.stdout.write(_error_message("Unhandled error %s\n%s" %
(str(e), traceback.format_exc())))
sys.exit(1)
else:
if response == None:
response = _error_message(None)
_log.info("Request completed with response: %s" % response)
sys.stdout.write(response)
sys.exit(0)
|
|
from abc import ABC
from collections import namedtuple
from functools import partial
import os
from unittest.mock import patch
from nativeconfig.exceptions import DeserializationError, ValidationError
from test import StubConfig
Option = namedtuple('OptionName', [
'option_type',
'value',
'alternate_value',
'invalid_value',
'invalid_json_value',
'invalid_raw_value'
])
def make_option_type(option_type, **kwargs):
t = partial(option_type, **kwargs)
t.__doc__ = option_type.__doc__
return t
class OptionMixin(ABC):
OPTIONS = None
OPTION_ENV_NAME = 'NATIVECONFIG_OPTION'
def tearDown(self):
os.environ.pop(self.OPTION_ENV_NAME, None)
def test_name_must_be_nonempty_string(self):
for o in self.OPTIONS:
with self.assertRaises(ValueError):
class MyConfig(StubConfig):
option = o.option_type(name=None)
with self.assertRaises(ValueError):
class MyConfig(StubConfig):
option = o.option_type(name='')
with self.assertRaises(ValueError):
class MyConfig(StubConfig):
option = o.option_type(name=42)
class MyConfig(StubConfig):
option = o.option_type(name='_')
def test_env_name_must_be_nonempty_string_if_set(self):
for o in self.OPTIONS:
with self.assertRaises(ValueError):
class MyConfig(StubConfig):
option = o.option_type('_', env_name='')
with self.assertRaises(ValueError):
class MyConfig(StubConfig):
option = o.option_type('_', env_name=42)
class MyConfig(StubConfig):
option = o.option_type('_', env_name=self.OPTION_ENV_NAME)
def test_choices_must_be_nonempty_iterable_if_set(self):
for o in self.OPTIONS:
with self.assertRaises(ValueError):
class MyConfig(StubConfig):
option = o.option_type('_', choices=[])
with self.assertRaises(ValueError):
class MyConfig(StubConfig):
option = o.option_type('_', choices=42)
class MyConfig(StubConfig):
option = o.option_type('_', choices=[o.value])
def test_choices_deep_copied(self):
for o in self.OPTIONS:
choices = [o.value]
class MyConfig(StubConfig):
option = o.option_type('_', choices=choices)
choices[0] = o.alternate_value
self.assertEqual(MyConfig.option.choices, [o.value])
def test_choices_property_returns_deep_copy(self):
for o in self.OPTIONS:
option = o.option_type('_', choices=[o.value, o.alternate_value])
choices = option.choices
self.assertEqual(choices, option._choices)
choices.pop()
self.assertNotEqual(choices, option._choices)
self.assertEqual(choices, [o.value])
self.assertEqual(option._choices, [o.value, o.alternate_value])
def test_doc_passed_from_constructor(self):
doc_string = 'Test option'
for o in self.OPTIONS:
class MyConfig(StubConfig):
option = o.option_type('_', doc=doc_string)
self.assertEqual(MyConfig.option.__doc__, doc_string)
self.assertEqual(MyConfig.get_instance().option_for_name('_').__doc__, doc_string)
def test_doc_inherited_if_not_passed(self):
for o in self.OPTIONS:
class MyConfig(StubConfig):
option = o.option_type('_')
self.assertEqual(MyConfig.option.__doc__, o.option_type.__doc__)
self.assertEqual(MyConfig.get_instance().option_for_name('_').__doc__, o.option_type.__doc__)
def test_default_must_be_in_choices_if_set(self):
for o in self.OPTIONS:
with self.assertRaises(ValidationError):
class MyConfig(StubConfig):
option = o.option_type('_', default=o.value, choices=[o.alternate_value])
class MyConfig(StubConfig):
option = o.option_type('_', default=o.value, choices=[o.value])
def test_choices_must_be_valid_if_set(self):
for o in self.OPTIONS:
with self.assertRaises(ValidationError):
class MyConfig(StubConfig):
option = o.option_type('_', choices=[o.invalid_value])
def test_default_must_be_valid_if_set(self):
for o in self.OPTIONS:
with self.assertRaises(ValidationError):
class MyConfig(StubConfig):
option = o.option_type('_', default=o.invalid_value)
def test_serialized_value_can_be_deserialized(self):
for o in self.OPTIONS:
class MyConfig(StubConfig):
option = o.option_type('_')
raw_value = MyConfig.option.serialize(o.value)
python_value = MyConfig.option.deserialize(raw_value)
self.assertEqual(python_value, o.value)
def test_json_serialized_value_can_be_deserialized(self):
for o in self.OPTIONS:
class MyConfig(StubConfig):
option = o.option_type('_')
json_value = MyConfig.option.serialize_json(o.value)
python_value = MyConfig.option.deserialize_json(json_value)
self.assertEqual(python_value, o.value)
def test_serialize_json_to_None(self):
for o in self.OPTIONS:
class MyConfig(StubConfig):
option = o.option_type('_')
self.assertEqual(MyConfig.option.serialize_json(None), 'null')
def test_deserialize_json_from_None(self):
for o in self.OPTIONS:
class MyConfig(StubConfig):
option = o.option_type('_')
self.assertEqual(MyConfig.option.deserialize_json('null'), None)
def test_setting_value(self):
for o in self.OPTIONS:
class MyConfig(StubConfig):
option = o.option_type('_')
c = MyConfig.get_instance()
c.option = o.value
self.assertEqual(c.option, o.value)
c.option = o.alternate_value
self.assertEqual(c.option, o.alternate_value)
def test_value_must_be_valid(self):
for o in self.OPTIONS:
class MyConfig(StubConfig):
option = o.option_type('_')
c = MyConfig.get_instance()
c.option = o.value
self.assertEqual(c.option, o.value)
with self.assertRaises(ValidationError):
c.option = o.invalid_value
def test_None_is_always_invalid(self):
for o in self.OPTIONS:
option = o.option_type('_')
with self.assertRaises(ValidationError):
option.validate(None)
def test_value_must_be_in_choices_if_set(self):
for o in self.OPTIONS:
class MyConfig(StubConfig):
option = o.option_type('_', choices=[o.value])
c = MyConfig.get_instance()
with self.assertRaises(ValidationError):
c.option = o.alternate_value
with self.assertRaises(ValidationError):
c.set_json_value_for_option_name('_', MyConfig.option.serialize_json(o.alternate_value))
with self.assertRaises(ValidationError):
c.set_raw_value_for_option_name('_', MyConfig.option.serialize(o.alternate_value))
def test_setting_one_shot_value(self):
for o in self.OPTIONS:
class MyConfig(StubConfig):
option = o.option_type('_')
c = MyConfig.get_instance()
c.option = o.value
self.assertEqual(c.option, o.value)
c.set_one_shot_value_for_option_name('_', o.alternate_value)
self.assertEqual(c.option, o.alternate_value)
self.assertEqual(getattr(c, MyConfig.option._getter)('_'), MyConfig.option.serialize(o.value))
def test_one_shot_value_must_be_valid(self):
for o in self.OPTIONS:
class MyConfig(StubConfig):
option = o.option_type('_')
c = MyConfig.get_instance()
c.option = o.value
self.assertEqual(c.option, o.value)
with self.assertRaises(ValidationError):
c.set_one_shot_value_for_option_name('_', o.invalid_value)
def test_one_shot_value_must_be_in_choices_if_set(self):
for o in self.OPTIONS:
class MyConfig(StubConfig):
option = o.option_type('_', choices=[o.value])
c = MyConfig.get_instance()
with self.assertRaises(ValidationError):
c.set_one_shot_value_for_option_name('_', o.alternate_value)
with self.assertRaises(ValidationError):
c.set_one_shot_json_value_for_option_name('_', MyConfig.option.serialize_json(o.alternate_value))
with self.assertRaises(ValidationError):
c.set_one_shot_raw_value_for_option_name('_', MyConfig.option.serialize(o.alternate_value))
def test_setting_env_value(self):
for o in self.OPTIONS:
os.environ.pop(self.OPTION_ENV_NAME, None)
class MyConfig(StubConfig):
option = o.option_type('_', env_name=self.OPTION_ENV_NAME)
c = MyConfig.get_instance()
c.option = o.value
self.assertEqual(c.option, o.value)
os.environ[self.OPTION_ENV_NAME] = MyConfig.option.serialize_json(o.alternate_value)
self.assertEqual(c.option, o.alternate_value)
def test_env_value_must_be_valid(self):
for o in self.OPTIONS:
os.environ.pop(self.OPTION_ENV_NAME, None)
class MyConfig(StubConfig):
option = o.option_type('_', env_name=self.OPTION_ENV_NAME)
c = MyConfig.get_instance()
c.option = o.value
self.assertEqual(c.option, o.value)
os.environ[self.OPTION_ENV_NAME] = o.invalid_json_value
with self.assertRaises(DeserializationError):
c.option
def test_env_value_must_be_in_choices_if_set(self):
for o in self.OPTIONS:
os.environ.pop(self.OPTION_ENV_NAME, None)
class MyConfig(StubConfig):
option = o.option_type('_', env_name=self.OPTION_ENV_NAME, choices=[o.value])
c = MyConfig.get_instance()
c.option = o.value
self.assertEqual(c.option, o.value)
os.environ[self.OPTION_ENV_NAME] = MyConfig.option.serialize_json(o.alternate_value)
with self.assertRaises(ValidationError):
c.option
def test_deleting_value_resets_to_default(self):
for o in self.OPTIONS:
class MyConfig(StubConfig):
option = o.option_type('_', default=o.value)
c = MyConfig.get_instance()
c.option = o.alternate_value
self.assertEqual(c.option, o.alternate_value)
c.option = None
self.assertEqual(c.option, o.value)
c.option = o.alternate_value
self.assertEqual(c.option, o.alternate_value)
del c.option
self.assertEqual(c.option, o.value)
def test_deleting_value_resets_one_shot_value(self):
for o in self.OPTIONS:
class MyConfig(StubConfig):
option = o.option_type('_')
c = MyConfig.get_instance()
c.option = o.value
self.assertEqual(c.option, o.value)
c.set_one_shot_value_for_option_name('_', o.alternate_value)
self.assertEqual(c.option, o.alternate_value)
del c.option
self.assertIsNone(c.option)
def test_deleting_value_preserves_env_if_set(self):
for o in self.OPTIONS:
os.environ.pop(self.OPTION_ENV_NAME, None)
class MyConfig(StubConfig):
option = o.option_type('_', env_name=self.OPTION_ENV_NAME)
c = MyConfig.get_instance()
os.environ[self.OPTION_ENV_NAME] = MyConfig.option.serialize_json(o.value)
self.assertEqual(c.option, o.value)
del c.option
self.assertEqual(c.option, o.value)
def test_value_that_cannot_be_deserialized_calls_resolver(self):
for o in self.OPTIONS:
if o.invalid_raw_value is None:
continue
class MyConfig(StubConfig):
option = o.option_type('_')
c = MyConfig.get_instance()
getattr(c, MyConfig.option._setter)('_', o.invalid_raw_value)
with self.assertRaises(DeserializationError):
c.option
with patch.object(StubConfig, 'resolve_value', return_value='unresolved'):
self.assertEqual(c.option, 'unresolved')
getattr(c, MyConfig.option._setter)('_', MyConfig.option.serialize(o.value))
self.assertEqual(c.option, o.value)
def test_invalid_deserialized_value_calls_resolver(self):
for o in self.OPTIONS:
class MyConfig(StubConfig):
option = o.option_type('_', choices=[o.value])
c = MyConfig.get_instance()
getattr(c, MyConfig.option._setter)('_', MyConfig.option.serialize(o.alternate_value))
with self.assertRaises(ValidationError):
c.option
with patch.object(StubConfig, 'resolve_value', return_value='unresolved'):
self.assertEqual(c.option, 'unresolved')
getattr(c, MyConfig.option._setter)('_', MyConfig.option.serialize(o.value))
self.assertEqual(c.option, o.value)
def test_env_value_that_cannot_be_deserialized_calls_resolver(self):
for o in self.OPTIONS:
os.environ.pop(self.OPTION_ENV_NAME, None)
class MyConfig(StubConfig):
option = o.option_type('_', env_name=self.OPTION_ENV_NAME)
c = MyConfig.get_instance()
os.environ[self.OPTION_ENV_NAME] = o.invalid_json_value
with self.assertRaises(DeserializationError):
c.option
with patch.object(StubConfig, 'resolve_value', return_value='unresolved'):
self.assertEqual(c.option, 'unresolved')
os.environ[self.OPTION_ENV_NAME] = MyConfig.option.serialize_json(o.value)
self.assertEqual(c.option, o.value)
os.environ[self.OPTION_ENV_NAME] = '['
with self.assertRaises(DeserializationError):
c.option
with patch.object(StubConfig, 'resolve_value', return_value='unresolved'):
self.assertEqual(c.option, 'unresolved')
os.environ[self.OPTION_ENV_NAME] = MyConfig.option.serialize_json(o.value)
self.assertEqual(c.option, o.value)
def test_invalid_deserialized_env_value_calls_resolver(self):
for o in self.OPTIONS:
os.environ.pop(self.OPTION_ENV_NAME, None)
class MyConfig(StubConfig):
option = o.option_type('_', env_name=self.OPTION_ENV_NAME, choices=[o.value])
c = MyConfig.get_instance()
os.environ[self.OPTION_ENV_NAME] = MyConfig.option.serialize_json(o.alternate_value)
with self.assertRaises(ValidationError):
c.option
with patch.object(StubConfig, 'resolve_value', return_value='unresolved'):
self.assertEqual(c.option, 'unresolved')
os.environ[self.OPTION_ENV_NAME] = MyConfig.option.serialize_json(o.value)
self.assertEqual(c.option, o.value)
def test_use_default_when_env_value_is_None(self):
for o in self.OPTIONS:
os.environ.pop(self.OPTION_ENV_NAME, None)
class MyConfig(StubConfig):
option = o.option_type('_', env_name=self.OPTION_ENV_NAME, default=o.value)
c = MyConfig.get_instance()
getattr(c, MyConfig.option._setter)('_', MyConfig.option.serialize(o.alternate_value))
self.assertEqual(c.option, o.alternate_value)
os.environ[self.OPTION_ENV_NAME] = 'null'
self.assertEqual(c.option, o.value)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Auto-tuning a convolutional network for NVIDIA GPU (NNVM)
=========================================================
**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_
Auto-tuning for specific devices and workloads is critical for getting the
best performance. This is a tutorial on how to tune a whole convolutional
network for NVIDIA GPU.
The operator implementation for NVIDIA GPU in TVM is written in template form.
The template has many tunable knobs (tile factor, unrolling, etc).
We will tune all convolution and depthwise convolution operators
in the neural network. After tuning, we produce a log file which stores
the best knob values for all required operators. When the tvm compiler compiles
these operators, it will query this log file to get the best knob values.
We also released pre-tuned parameters for some NVIDIA GPUs. You can go to
`NVIDIA GPU Benchmark <https://github.com/dmlc/tvm/wiki/Benchmark#nvidia-gpu>`_
to see the results.
"""
######################################################################
# Install dependencies
# --------------------
# To use the autotvm package in tvm, we need to install some extra dependencies.
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user psutil xgboost tornado
#
# To make tvm run faster during tuning, it is recommended to use cython
# as FFI of tvm. In the root directory of tvm, execute:
#
# .. code-block:: bash
#
# pip3 install --user cython
# sudo make cython3
#
# Now return to python code. Import packages.
import os
import numpy as np
import nnvm.testing
import nnvm.compiler
import tvm
from tvm import autotvm
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.contrib.util import tempdir
import tvm.contrib.graph_runtime as runtime
#################################################################
# Define Network
# --------------
# First we need to define the network in nnvm symbol API.
# We can load some pre-defined network from :code:`nnvm.testing`.
# We can also load models from MXNet, ONNX and TensorFlow (see NNVM
# tutorials :ref:`tutorial-nnvm` for more details).
def get_network(name, batch_size):
"""Get the symbol definition and random weight of a network"""
input_shape = (batch_size, 3, 224, 224)
output_shape = (batch_size, 1000)
if "resnet" in name:
n_layer = int(name.split('-')[1])
net, params = nnvm.testing.resnet.get_workload(num_layers=n_layer, batch_size=batch_size)
elif "vgg" in name:
n_layer = int(name.split('-')[1])
net, params = nnvm.testing.vgg.get_workload(num_layers=n_layer, batch_size=batch_size)
elif name == 'mobilenet':
net, params = nnvm.testing.mobilenet.get_workload(batch_size=batch_size)
elif name == 'squeezenet_v1.1':
net, params = nnvm.testing.squeezenet.get_workload(batch_size=batch_size, version='1.1')
elif name == 'inception_v3':
input_shape = (1, 3, 299, 299)
net, params = nnvm.testing.inception_v3.get_workload(batch_size=batch_size)
elif name == 'custom':
# an example for custom network
from nnvm.testing import utils
net = nnvm.sym.Variable('data')
net = nnvm.sym.conv2d(net, channels=4, kernel_size=(3,3), padding=(1,1))
net = nnvm.sym.flatten(net)
net = nnvm.sym.dense(net, units=1000)
net, params = utils.create_workload(net, batch_size, (3, 224, 224))
elif name == 'mxnet':
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
block = get_model('resnet18_v1', pretrained=True)
net, params = nnvm.frontend.from_mxnet(block)
net = nnvm.sym.softmax(net)
else:
raise ValueError("Unsupported network: " + name)
return net, params, input_shape, output_shape
###########################################
# Set Tuning Options
# ------------------
# Before tuning, we apply some configurations.
#### DEVICE CONFIG ####
target = tvm.target.cuda()
#### TUNING OPTION ####
network = 'resnet-18'
log_file = "%s.log" % network
dtype = 'float32'
tuning_option = {
'log_filename': log_file,
'tuner': 'xgb',
'n_trial': 2000,
'early_stopping': 600,
'measure_option': autotvm.measure_option(
builder=autotvm.LocalBuilder(timeout=10),
runner=autotvm.LocalRunner(number=20, repeat=3, timeout=4, min_repeat_ms=150),
),
}
####################################################################
#
# .. note:: How to set tuning options
#
# In general, the default value provided here works well.
#
# If you have large time budget, you can set :code:`n_trial`, :code:`early_stopping` larger,
# which makes the tuning runs longer.
#
# If you have multiple devices, you can use all of them for measurement to
# accelerate the tuning process. (see the 'Scale up measurement` section below).
#
###################################################################
# Begin Tuning
# ------------
# Now we can extract tuning tasks from the network and begin tuning.
# Here, we provide a simple utility function to tune a list of tasks.
# This function is just an initial implementation which tunes them in sequential order.
# We will introduce a more sophisticated tuning scheduler in the future.
# You can skip the implementation of this function for this tutorial.
def tune_tasks(tasks,
measure_option,
tuner='xgb',
n_trial=1000,
early_stopping=None,
log_filename='tuning.log',
use_transfer_learning=True,
try_winograd=True):
if try_winograd:
for i in range(len(tasks)):
try: # try winograd template
tsk = autotvm.task.create(tasks[i].name, tasks[i].args,
tasks[i].target, tasks[i].target_host, 'winograd')
input_channel = tsk.workload[1][1]
if input_channel >= 64:
tasks[i] = tsk
except Exception:
pass
# create tmp log file
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " %(i+1, len(tasks))
# create tuner
if tuner == 'xgb' or tuner == 'xgb-rank':
tuner_obj = XGBTuner(tsk, loss_type='rank')
elif tuner == 'ga':
tuner_obj = GATuner(tsk, pop_size=100)
elif tuner == 'random':
tuner_obj = RandomTuner(tsk)
elif tuner == 'gridsearch':
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
# do tuning
tuner_obj.tune(n_trial=min(n_trial, len(tsk.config_space)),
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(n_trial, prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file)])
# pick best records to a cache file
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
########################################################################
# Finally, we launch tuning jobs and evaluate the end-to-end performance.
def tune_and_evaluate(tuning_opt):
# extract workloads from nnvm graph
print("Extract tasks...")
net, params, input_shape, out_shape = get_network(network, batch_size=1)
tasks = autotvm.task.extract_from_graph(net, target=target,
shape={'data': input_shape}, dtype=dtype,
symbols=(nnvm.sym.conv2d,))
# run tuning tasks
print("Tuning...")
tune_tasks(tasks, **tuning_opt)
# compile kernels with history best records
with autotvm.apply_history_best(log_file):
print("Compile...")
with nnvm.compiler.build_config(opt_level=3):
graph, lib, params = nnvm.compiler.build(
net, target=target, shape={'data': input_shape}, params=params, dtype=dtype)
# export library
tmp = tempdir()
filename = "net.tar"
lib.export_library(tmp.relpath(filename))
# load parameters
ctx = tvm.context(str(target), 0)
module = runtime.create(graph, lib, ctx)
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input('data', data_tvm)
module.set_input(**params)
# evaluate
print("Evaluate inference time cost...")
ftimer = module.module.time_evaluator("run", ctx, number=1, repeat=600)
prof_res = np.array(ftimer().results) * 1000 # convert to millisecond
print("Mean inference time (std dev): %.2f ms (%.2f ms)" %
(np.mean(prof_res), np.std(prof_res)))
# We do not run the tuning in our webpage server since it takes too long.
# Uncomment the following line to run it by yourself.
# tune_and_evaluate(tuning_option)
######################################################################
# Sample Output
# -------------
# The tuning needs to compile many programs and extract feature from them.
# So a high performance CPU is recommended. One sample output is listed below.
# It takes about 4 hours to get the following output on a 32T AMD Ryzen Threadripper.
# The tuning target is NVIDIA 1080 Ti.
# (You can see some errors during compilation. If the tuning is not stuck, it is okay.)
#
# .. code-block:: bash
#
# Extract tasks...
# Tuning...
# [Task 1/12] Current/Best: 541.83/3570.66 GFLOPS | Progress: (960/2000) | 1001.31 s Done.
# [Task 2/12] Current/Best: 0.56/ 803.33 GFLOPS | Progress: (704/2000) | 608.08 s Done.
# [Task 3/12] Current/Best: 103.69/1141.25 GFLOPS | Progress: (768/2000) | 702.13 s Done.
# [Task 4/12] Current/Best: 2905.03/3925.15 GFLOPS | Progress: (864/2000) | 745.94 sterminate called without an active exception
# [Task 4/12] Current/Best: 2789.36/3925.15 GFLOPS | Progress: (1056/2000) | 929.40 s Done.
# [Task 5/12] Current/Best: 89.06/1076.24 GFLOPS | Progress: (704/2000) | 601.73 s Done.
# [Task 6/12] Current/Best: 40.39/2129.02 GFLOPS | Progress: (1088/2000) | 1125.76 s Done.
# [Task 7/12] Current/Best: 4090.53/5007.02 GFLOPS | Progress: (800/2000) | 903.90 s Done.
# [Task 8/12] Current/Best: 4.78/1272.28 GFLOPS | Progress: (768/2000) | 749.14 s Done.
# [Task 9/12] Current/Best: 1391.45/2325.08 GFLOPS | Progress: (992/2000) | 1084.87 s Done.
# [Task 10/12] Current/Best: 1995.44/2383.59 GFLOPS | Progress: (864/2000) | 862.60 s Done.
# [Task 11/12] Current/Best: 4093.94/4899.80 GFLOPS | Progress: (224/2000) | 240.92 sterminate called without an active exception
# [Task 11/12] Current/Best: 3487.98/4909.91 GFLOPS | Progress: (480/2000) | 534.96 sterminate called without an active exception
# [Task 11/12] Current/Best: 4636.84/4912.17 GFLOPS | Progress: (1184/2000) | 1381.16 sterminate called without an active exception
# [Task 11/12] Current/Best: 50.12/4912.17 GFLOPS | Progress: (1344/2000) | 1602.81 s Done.
# [Task 12/12] Current/Best: 3581.31/4286.30 GFLOPS | Progress: (736/2000) | 943.52 s Done.
# Compile...
# Evaluate inference time cost...
# Mean inference time (std dev): 1.07 ms (0.05 ms)
#
# As a reference baseline, the time cost of MXNet + TensorRT on resnet-18 is 1.30ms. So we are a little faster.
######################################################################
#
# .. note:: **Experiencing Difficulties?**
#
# The auto tuning module is error-prone. If you always see " 0.00/ 0.00 GFLOPS",
# then there must be something wrong.
#
# First, make sure you set the correct configuration of your device.
# Then, you can print debug information by adding these lines in the beginning
# of the script. It will print every measurement result, where you can find useful
# error messages.
#
# .. code-block:: python
#
# import logging
# logging.getLogger('autotvm').setLevel(logging.DEBUG)
#
# Finally, always feel free to ask our community for help on https://discuss.tvm.ai
#################################################################
# Scale up measurement by using multiple devices
# ----------------------------------------------
#
# If you have multiple devices, you can use all of them for measurement.
# TVM uses the RPC Tracker to manage distributed devices.
# The RPC Tracker is a centralized master node. We can register all devices to
# the tracker. For example, if we have 10 GPU cards, we can register all of them
# to the tracker, and run 10 measurements in parallel, accelerating the tuning process.
#
# To start an RPC tracker, run this command on the host machine. The tracker is
# required during the whole tuning process, so we need to open a new terminal for
# this command:
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190
#
# The expected output is
#
# .. code-block:: bash
#
# INFO:RPCTracker:bind to 0.0.0.0:9190
#
# Then open another new terminal for the RPC server. We need to start one server
# for each dedicated device. We use a string key to distinguish the types of devices.
# You can pick a name you like.
# (Note: For rocm backend, there are some internal errors with the compiler,
# we need to add `--no-fork` to the argument list.)
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_server --tracker=localhost:9190 --key=1080ti
#
# After registering devices, we can confirm it by querying rpc_tracker
#
# .. code-block:: bash
#
# python -m tvm.exec.query_rpc_tracker --host=localhost --port=9190
#
# For example, if we have four 1080ti, two titanx and one gfx900, the output can be
#
# .. code-block:: bash
#
# Queue Status
# ----------------------------------
# key total free pending
# ----------------------------------
# 1080ti 4 4 0
# titanx 2 2 0
# gfx900 1 1 0
# ----------------------------------
#
# Finally, we need to change the tuning option to use RPCRunner. Use the code below
# to replace the corresponding part above.
tuning_option = {
'log_filename': log_file,
'tuner': 'xgb',
'n_trial': 2000,
'early_stopping': 600,
'measure_option': autotvm.measure_option(
builder=autotvm.LocalBuilder(timeout=10),
runner=autotvm.RPCRunner(
'1080ti', # change the device key to your key
'localhost', 9190,
number=20, repeat=3, timeout=4, min_repeat_ms=150),
),
}
|
|
from direct.directnotify import DirectNotifyGlobal
from toontown.cogdominium.DistCogdoGameAI import DistCogdoGameAI
import CogdoMazeGameGlobals
from direct.distributed.ClockDelta import *
from direct.task import Timer
from toontown.battle import BattleBase
from toontown.building.ElevatorConstants import *
ALL_ABOARD_LAG = 3.5
BASE_TOON_UP = 10
JOKE_TOON_UP = 5
class DistCogdoMazeGameAI(DistCogdoGameAI):
notify = DirectNotifyGlobal.directNotify.newCategory("DistCogdoMazeGameAI")
delayIntro = BattleBase.ELEVATOR_T + ElevatorData[ELEVATOR_NORMAL]['openTime']
def __init__(self, air):
DistCogdoGameAI.__init__(self, air)
self.numSuits = (0,0,0)
self.timer = Timer.Timer()
self.doorRevealed = False
self.toonsInDoor = []
self.bosses = {}
self.fastMinions = {}
self.slowMinions = {}
self.suitTypes = [self.bosses, self.fastMinions, self.slowMinions]
self.numJokes = {}
def announceGenerate(self):
DistCogdoGameAI.announceGenerate(self)
self.setupSuitsAI()
def setupSuitsAI(self):
bossHp = CogdoMazeGameGlobals.SuitData[0]['hp']
fastMiniHp = CogdoMazeGameGlobals.SuitData[1]['hp']
slowMiniHp = CogdoMazeGameGlobals.SuitData[2]['hp']
serialNum = 0
for i in range(self.numSuits[0]):
self.bosses[serialNum] = bossHp
serialNum += 1
for i in range(self.numSuits[1]):
self.fastMinions[serialNum] = fastMiniHp
serialNum += 1
for i in range(self.numSuits[2]):
self.slowMinions[serialNum] = slowMiniHp
serialNum += 1
def setNumSuits(self, num):
self.numSuits = num
def getNumSuits(self):
return self.numSuits
def requestUseGag(self, x, y, h, timestamp):
avId = self.air.getAvatarIdFromSender()
self.sendUpdate('toonUsedGag', [avId, x, y, h, globalClockDelta.getRealNetworkTime()])
def requestSuitHitByGag(self, suitType, suitNum):
hitAI = self.hitSuitAI(suitType, suitNum)
if not hitAI:
self.notify.warning('Cannot hit suit!')
return
avId = self.air.getAvatarIdFromSender()
self.sendUpdate('suitHitByGag', [avId, suitType, suitNum])
def requestHitBySuit(self, suitType, suitNum, nettime):
avId = self.air.getAvatarIdFromSender()
av = self.air.doId2do.get(avId)
if av:
lostHp = CogdoMazeGameGlobals.SuitData[suitType]['toonDamage'] * self.getDifficulty() * 10
av.takeDamage(lostHp)
networkTime = globalClockDelta.getRealNetworkTime()
self.sendUpdate('toonHitBySuit', [avId, suitType, suitNum, networkTime])
if av.getHp() < 1:
self.toonWentSad(avId)
def requestHitByDrop(self):
avId = self.air.getAvatarIdFromSender()
av = self.air.doId2do.get(avId)
if av:
lostHp = CogdoMazeGameGlobals.DropDamage
av.takeDamage(lostHp)
self.sendUpdate('toonHitByDrop', [avId])
def requestPickUp(self, pickupNum):
avId = self.air.getAvatarIdFromSender()
av = self.air.doId2do.get(avId)
if av:
now = globalClockDelta.getRealNetworkTime()
if avId in self.numJokes:
self.numJokes[avId] += 1
else:
self.numJokes[avId] = 1
self.sendUpdate('pickUp', [avId, pickupNum, now])
def requestGag(self, coolerIndex):
avId = self.air.getAvatarIdFromSender()
self.sendUpdate('hasGag', [avId, globalClockDelta.getRealNetworkTime()])
def hitSuitAI(self, suitType, suitNum):
cogKey = None
for cogNum in self.suitTypes[suitType].keys():
if cogNum == suitNum:
cogKey = cogNum
break
if cogKey == None:
return 0
cogHp = self.suitTypes[suitType][cogKey]
cogHp -= 1
self.suitTypes[suitType][cogKey] = cogHp
if cogHp <= 0:
del self.suitTypes[suitType][cogKey]
return 1
def handleStart(self):
taskMgr.add(self.__checkGameDone, self.taskName('check-game-done'))
taskMgr.add(self.__checkPlayersTask, self.taskName('check-players-task'))
serverDelay = 1.0
self.timer.startCallback(CogdoMazeGameGlobals.SecondsUntilTimeout + serverDelay, self.__handleGameOver)
taskMgr.doMethodLater(serverDelay, self.clientCountdown, self.taskName('client_countdown'))
taskMgr.add(self.__timeWarningTask, self.taskName('time-warning-task'))
def clientCountdown(self, task):
self.doAction(CogdoMazeGameGlobals.GameActions.Countdown, 0)
return task.done
def __handleGameOver(self):
for toon in self.toons:
if not toon in self.toonsInDoor:
self.killToon(toon)
self.removeAll()
self.gameDone(failed=True)
def __checkGameDone(self, task):
bossesLeft = self.bosses
if len(bossesLeft) == 0:
self.timer.stop()
self.doAction(CogdoMazeGameGlobals.GameActions.OpenDoor, 0)
self.__startTimeout()
return task.done
return task.again
def __startTimeout(self):
self.timer.startCallback(CogdoMazeGameGlobals.SecondsUntilGameEnds, self.__handleTimeout)
def __handleTimeout(self):
for toon in self.toons:
if not toon in self.toonsInDoor:
self.killToon(toon)
self.removeAll()
self.gameDone()
def __timeWarningTask(self, task):
if self.timer.getT() <= CogdoMazeGameGlobals.SecondsForTimeAlert:
self.doAction(CogdoMazeGameGlobals.GameActions.TimeAlert, 0)
return task.done
return task.again
def killToon(self, avId):
av = self.air.doId2do.get(avId)
if av:
if av.getHp() > 0:
av.takeDamage(av.getHp())
self.toonWentSad(avId)
self.__playerDisconnected(avId)
def __checkPlayersTask(self, task):
for toonId in self.toons:
toon = self.air.doId2do.get(toonId)
if not toon:
self.__playerDisconnected(toonId)
return task.again
def __playerDisconnected(self, avId):
self.sendUpdate('setToonDisconnect', [avId])
self.toons.pop(self.toons.index(avId))
if len(self.toons) == 0:
self.removeAll()
self.gameDone(failed=True)
def doAction(self, action, data):
self.sendUpdate('doAction', [action, data, globalClockDelta.getRealNetworkTime()])
def requestAction(self, action, data):
Globals = CogdoMazeGameGlobals
avId = self.air.getAvatarIdFromSender()
if action == Globals.GameActions.RevealDoor:
if not self.doorRevealed:
self.doAction(action, avId)
self.doorRevealed = True
else:
self.notify.warning('Toon tried to reveal door but it\'s already revealed! Ignoring.')
elif action == Globals.GameActions.EnterDoor:
if not avId in self.toonsInDoor:
self.doAction(action, avId)
self.toonsInDoor.append(avId)
self.toonUpToon(avId)
else:
self.notify.warning('Toon tried to enter into door but already entered! Ignoring.')
return
if len(self.toonsInDoor) >= len(self.toons):
self.__handleAllAboard()
else:
self.notify.warning('Client requested unknown action \'%s\'' %action)
def __handleAllAboard(self):
if len(self.toonsInDoor) != len(self.toons):
self.notify.warning('__handleAllAboard expect all toons aboard!')
return
self.removeAll()
taskMgr.doMethodLater(ALL_ABOARD_LAG, lambda t: self.gameDone(), self.taskName('all-aboard-delay'))
def toonUpToon(self, toonId):
if toonId in self.toonsInDoor:
toon = self.air.doId2do.get(toonId)
if toon:
val = min(BASE_TOON_UP + JOKE_TOON_UP * self.numJokes.get(toonId, 0), toon.getMaxHp())
toon.toonUp(val)
def removeAll(self):
taskMgr.remove(self.taskName('check-game-done'))
taskMgr.remove(self.taskName('check-players-task'))
taskMgr.remove(self.taskName('time-warning-task'))
taskMgr.remove(self.taskName('all-aboard-delay'))
self.timer.stop()
def disable(self):
DistCogdoGameAI.disable(self)
self.removeAll()
from otp.ai.MagicWordGlobal import *
@magicWord(category=CATEGORY_OVERRIDE)
def endMaze():
if hasattr(simbase.air, 'cogdoGame'):
maze = simbase.air.cogdoGame
maze.doAction(CogdoMazeGameGlobals.GameActions.OpenDoor, 0)
return 'Completed Maze Game'
|
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_dosta_abcdjm_dcl
@file marine-integrations/mi/dataset/parser/test/test_dosta_abcdjm_dcl.py
@author Steve Myerson
@brief Test code for a Dosta_abcdjm_dcl data parser
In the following files, Metadata consists of 4 records
and Garbled consist of 3 records.
There is 1 group of Sensor Data records for each set of metadata.
Files used for testing:
20000101.dosta0.log
Metadata - 1 set, Sensor Data - 0 records, Garbled - 0, Newline - \n
20010121.dosta1.log
Metadata - 1 set, Sensor Data - 21 records, Garbled - 0, Newline - \n
20020222.dosta2.log
Metadata - 2 sets, Sensor Data - 22 records, Garbled - 0, Newline - \r\n
20030314.dosta3.log
Metadata - 3 sets, Sensor Data - 14 records, Garbled - 0, Newline - \n
20041225.dosta4.log
Metadata - 2 sets, Sensor Data - 250 records, Garbled - 0, Newline - \n
20050103.dosta5.log
Metadata - 1 set, Sensor Data - 3 records, Garbled - 1, Newline - \n
20060207.dosta6.log
Metadata - 2 sets, Sensor Data - 7 records, Garbled - 2, Newline \r\n
20070114.dosta7.log
This file contains a boatload of invalid sensor data records. Newline - \r\n
1. invalid year
2. invalid month
3. invalid day
4. invalid hour
5. invalid minute
6. invalid second
7. invalid product
8. spaces instead of tabs
9. a 2-digit serial number
10. floating point number missing the decimal point
11. serial number missing
12. one of the floating point numbers missing
13. Date in form YYYY-MM-DD
14. time field missing milliseconds
15. extra floating point number in sensor data
"""
import os
from nose.plugins.attrib import attr
from mi.core.exceptions import UnexpectedDataException
from mi.core.log import get_logger
from mi.dataset.driver.dosta_abcdjm.dcl.resource import RESOURCE_PATH
from mi.dataset.parser.dosta_abcdjm_dcl import \
DostaAbcdjmDclRecoveredParser, \
DostaAbcdjmDclTelemeteredParser
from mi.dataset.parser.utilities import particle_to_yml
from mi.dataset.test.test_parser import ParserUnitTestCase
log = get_logger()
FILE0 = '20000101.dosta0.log'
FILE1 = '20010121.dosta1.log'
FILE3 = '20030314.dosta3.log'
FILE4 = '20041225.dosta4.log'
FILE5 = '20050103.dosta5.log'
FILE6 = '20060207.dosta6.log'
FILE7 = '20070114.dosta7.log'
@attr('UNIT', group='mi')
class DostaAbcdjmDclParserUnitTestCase(ParserUnitTestCase):
def create_rec_parser(self, file_handle):
"""
This function creates a DostaAbcdjmDcl parser for recovered data.
"""
parser = DostaAbcdjmDclRecoveredParser(
file_handle, self.exception_callback)
return parser
def create_tel_parser(self, file_handle):
"""
This function creates a DostaAbcdjmDcl parser for telemetered data.
"""
parser = DostaAbcdjmDclTelemeteredParser(
file_handle, self.exception_callback)
return parser
def open_file(self, filename):
in_file = open(os.path.join(RESOURCE_PATH, filename), mode='r')
return in_file
def create_yml(self, particles, filename):
particle_to_yml(particles, os.path.join(RESOURCE_PATH, filename))
def test_big_giant_input(self):
"""
Read a large file and verify that all expected particles can be read.
Verification is not done at this time, but will be done during
integration and qualification testing.
File used for this test has 500 total particles.
"""
log.debug('===== START TEST BIG GIANT INPUT RECOVERED =====')
in_file = self.open_file(FILE4)
parser = self.create_rec_parser(in_file)
number_expected_results = 500
# In a single read, get all particles in this file.
result = parser.get_records(number_expected_results)
self.assertEqual(len(result), number_expected_results)
in_file.close()
self.assertEqual(self.exception_callback_value, [])
log.debug('===== START TEST BIG GIANT INPUT TELEMETERED =====')
in_file = self.open_file(FILE4)
parser = self.create_tel_parser(in_file)
# In a single read, get all particles in this file.
result = parser.get_records(number_expected_results)
self.assertEqual(len(result), number_expected_results)
in_file.close()
self.assertEqual(self.exception_callback_value, [])
log.debug('===== END TEST BIG GIANT INPUT =====')
def test_get_many(self):
"""
Read a file and pull out multiple data particles at one time.
Verify that the results are those we expected.
"""
log.debug('===== START TEST GET MANY RECOVERED =====')
expected_particle = 40
in_file = self.open_file(FILE3)
parser = self.create_rec_parser(in_file)
# In a single read, get all particles for this file.
result = parser.get_records(expected_particle)
self.assertEqual(len(result), expected_particle)
self.assertEqual(self.exception_callback_value, [])
in_file.close()
log.debug('===== START TEST GET MANY TELEMETERED =====')
in_file = self.open_file(FILE3)
parser = self.create_tel_parser(in_file)
# In a single read, get all particles for this file.
result = parser.get_records(expected_particle)
self.assertEqual(len(result), expected_particle)
self.assertEqual(self.exception_callback_value, [])
in_file.close()
log.debug('===== END TEST GET MANY =====')
def test_invalid_metadata_records(self):
"""
Read data from a file containing invalid metadata records as well
as valid metadata records and sensor data records.
Verify that the sensor data records can be read correctly
and that invalid metadata records are detected.
File 5 has 3 invalid metadata records.
File 6 has 6 invalid metadata records.
"""
log.debug('===== START TEST INVALID METADATA RECOVERED =====')
expected_particle = 1
in_file = self.open_file(FILE5)
parser = self.create_rec_parser(in_file)
# Get record and verify.
result = parser.get_records(expected_particle)
self.assertEqual(len(result), expected_particle)
self.assertEqual(len(self.exception_callback_value), 3)
in_file.close()
self.exception_callback_value = [] # reset exceptions
log.debug('===== START TEST INVALID METADATA TELEMETERED =====')
in_file = self.open_file(FILE6)
parser = self.create_tel_parser(in_file)
# In a single read, get all particles for this file.
result = parser.get_records(expected_particle)
self.assertEqual(len(result), expected_particle)
self.assertEqual(len(self.exception_callback_value), 6)
in_file.close()
log.debug('===== END TEST INVALID METADATA =====')
def test_invalid_sensor_data_records(self):
"""
Read data from a file containing invalid sensor data records.
Verify that no instrument particles are produced
and the correct number of exceptions are detected.
"""
log.debug('===== START TEST INVALID SENSOR DATA RECOVERED =====')
in_file = self.open_file(FILE7)
parser = self.create_rec_parser(in_file)
expected_exceptions = 15
# Try to get records and verify that none are returned.
result = parser.get_records(1)
self.assertEqual(result, [])
self.assertEqual(len(self.exception_callback_value), expected_exceptions)
in_file.close()
self.exception_callback_value = [] # reset exceptions
log.debug('===== START TEST INVALID SENSOR DATA TELEMETERED =====')
in_file = self.open_file(FILE7)
parser = self.create_tel_parser(in_file)
# Try to get records and verify that none are returned.
result = parser.get_records(1)
self.assertEqual(result, [])
self.assertEqual(len(self.exception_callback_value), expected_exceptions)
in_file.close()
log.debug('===== END TEST INVALID SENSOR DATA =====')
def test_no_sensor_data(self):
"""
Read a file containing no sensor data records
and verify that no particles are produced.
"""
log.debug('===== START TEST NO SENSOR DATA RECOVERED =====')
in_file = self.open_file(FILE0)
parser = self.create_rec_parser(in_file)
# Try to get a record and verify that none are produced.
result = parser.get_records(1)
self.assertEqual(result, [])
self.assertEquals(self.exception_callback_value, [])
in_file.close()
log.debug('===== START TEST NO SENSOR DATA TELEMETERED =====')
in_file = self.open_file(FILE0)
parser = self.create_tel_parser(in_file)
# Try to get a record and verify that none are produced.
result = parser.get_records(1)
self.assertEqual(result, [])
self.assertEquals(self.exception_callback_value, [])
in_file.close()
log.debug('===== END TEST SENSOR DATA =====')
def test_many_with_yml(self):
"""
Read a file and verify that all records can be read.
Verify that the contents of the particles are correct.
There should be no exceptions generated.
"""
log.debug('===== START TEST MANY WITH YML RECOVERED =====')
num_particles = 21
in_file = self.open_file(FILE1)
parser = self.create_rec_parser(in_file)
particles = parser.get_records(num_particles)
log.debug("Num particles: %d", len(particles))
self.assert_particles(particles, "rec_20010121.dosta1.yml", RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
in_file.close()
log.debug('===== START TEST MANY WITH YML TELEMETERED =====')
in_file = self.open_file(FILE1)
parser = self.create_tel_parser(in_file)
particles = parser.get_records(num_particles)
log.debug("Num particles: %d", len(particles))
self.assert_particles(particles, "tel_20010121.dosta1.yml", RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
in_file.close()
log.debug('===== END TEST MANY WITH YML =====')
def test_Bug_4433(self):
"""
Read a file and verify that all records can be read.
Verify that the contents of the particles are correct.
There should be no exceptions generated.
"""
num_particles = 10000
in_file = self.open_file('20150330.dosta1.log')
parser = self.create_rec_parser(in_file)
particles = parser.get_records(num_particles)
log.debug("Num particles: %d", len(particles))
# make sure we only get UnexpectedDataException
for exception in self.exception_callback_value:
self.assertIsInstance(exception, UnexpectedDataException)
in_file.close()
|
|
from __future__ import unicode_literals
import pytz
from nose.tools import * # flake8: noqa
from api.base.settings.defaults import API_BASE
from api_tests import utils as api_utils
from framework.auth.core import Auth
from tests.base import ApiTestCase, capture_signals
from tests.factories import (
ProjectFactory,
UserFactory,
AuthUserFactory,
CommentFactory
)
from website.addons.osfstorage import settings as osfstorage_settings
from website.project.signals import contributor_removed
from website.project.model import NodeLog
# stolen from^W^Winspired by DRF rest_framework.fields.DateTimeField.to_representation
def _dt_to_iso8601(value):
iso8601 = value.isoformat()
if iso8601.endswith('+00:00'):
iso8601 = iso8601[:-9] + 'Z' # offset upped to 9 to get rid of 3 ms decimal points
return iso8601
class TestFileView(ApiTestCase):
def setUp(self):
super(TestFileView, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user, comment_level='public')
self.file = api_utils.create_test_file(self.node, self.user, create_guid=False)
self.file_url = '/{}files/{}/'.format(API_BASE, self.file._id)
def test_must_have_auth(self):
res = self.app.get(self.file_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_must_be_contributor(self):
user = AuthUserFactory()
res = self.app.get(self.file_url, auth=user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_unvisited_file_has_no_guid(self):
res = self.app.get(self.file_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['guid'], None)
def test_visited_file_has_guid(self):
guid = self.file.get_guid(create=True)
res = self.app.get(self.file_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_is_not_none(guid)
assert_equal(res.json['data']['attributes']['guid'], guid._id)
def test_get_file(self):
res = self.app.get(self.file_url, auth=self.user.auth)
self.file.versions[-1]._clear_caches()
self.file.versions[-1].reload()
assert_equal(res.status_code, 200)
assert_equal(res.json.keys(), ['data'])
attributes = res.json['data']['attributes']
assert_equal(attributes['path'], self.file.path)
assert_equal(attributes['kind'], self.file.kind)
assert_equal(attributes['name'], self.file.name)
assert_equal(attributes['materialized_path'], self.file.materialized_path)
assert_equal(attributes['last_touched'], None)
assert_equal(attributes['provider'], self.file.provider)
assert_equal(attributes['size'], self.file.versions[-1].size)
assert_equal(attributes['date_modified'], _dt_to_iso8601(self.file.versions[-1].date_created.replace(tzinfo=pytz.utc)))
assert_equal(attributes['date_created'], _dt_to_iso8601(self.file.versions[0].date_created.replace(tzinfo=pytz.utc)))
assert_equal(attributes['extra']['hashes']['md5'], None)
assert_equal(attributes['extra']['hashes']['sha256'], None)
assert_equal(attributes['tags'], [])
def test_file_has_rel_link_to_owning_project(self):
res = self.app.get(self.file_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_in('node', res.json['data']['relationships'].keys())
expected_url = self.node.api_v2_url
actual_url = res.json['data']['relationships']['node']['links']['related']['href']
assert_in(expected_url, actual_url)
def test_file_has_comments_link(self):
guid = self.file.get_guid(create=True)
res = self.app.get(self.file_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_in('comments', res.json['data']['relationships'].keys())
expected_url = '/{}nodes/{}/comments/?filter[target]={}'.format(API_BASE, self.node._id, guid._id)
url = res.json['data']['relationships']['comments']['links']['related']['href']
assert_in(expected_url, url)
def test_file_has_correct_unread_comments_count(self):
contributor = AuthUserFactory()
self.node.add_contributor(contributor, auth=Auth(self.user), save=True)
comment = CommentFactory(node=self.node, target=self.file.get_guid(create=True), user=contributor, page='files')
res = self.app.get('/{}files/{}/?related_counts=True'.format(API_BASE, self.file._id), auth=self.user.auth)
assert_equal(res.status_code, 200)
unread_comments = res.json['data']['relationships']['comments']['links']['related']['meta']['unread']
assert_equal(unread_comments, 1)
def test_only_project_contrib_can_comment_on_closed_project(self):
self.node.comment_level = 'private'
self.node.is_public = True
self.node.save()
res = self.app.get(self.file_url, auth=self.user.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, True)
non_contributor = AuthUserFactory()
res = self.app.get(self.file_url, auth=non_contributor.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, False)
def test_any_loggedin_user_can_comment_on_open_project(self):
self.node.is_public = True
self.node.save()
non_contributor = AuthUserFactory()
res = self.app.get(self.file_url, auth=non_contributor.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, True)
def test_non_logged_in_user_cant_comment(self):
self.node.is_public = True
self.node.save()
res = self.app.get(self.file_url)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, False)
def test_checkout(self):
assert_equal(self.file.checkout, None)
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth
)
self.file.reload()
self.file.save()
self.node.reload()
assert_equal(res.status_code, 200)
assert_equal(self.file.checkout, self.user)
res = self.app.get(
self.file_url,
auth=self.user.auth
)
assert_equal(len(self.node.logs),2)
assert_equal(self.node.logs[-1].action, NodeLog.CHECKED_OUT)
assert_equal(self.node.logs[-1].user, self.user)
assert_equal(
self.user._id,
res.json['data']['relationships']['checkout']['links']['related']['meta']['id']
)
assert_in(
'/{}users/{}/'.format(API_BASE, self.user._id),
res.json['data']['relationships']['checkout']['links']['related']['href']
)
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': None}}},
auth=self.user.auth
)
self.file.reload()
assert_equal(self.file.checkout, None)
assert_equal(res.status_code, 200)
def test_checkout_file_no_type(self):
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
def test_checkout_file_no_id(self):
res = self.app.put_json_api(
self.file_url,
{'data': {'type': 'files', 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
def test_checkout_file_incorrect_type(self):
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'Wrong type.', 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth, expect_errors=True
)
assert_equal(res.status_code, 409)
def test_checkout_file_incorrect_id(self):
res = self.app.put_json_api(
self.file_url,
{'data': {'id': '12345', 'type': 'files', 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth, expect_errors=True
)
assert_equal(res.status_code, 409)
def test_checkout_file_no_attributes(self):
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files'}},
auth=self.user.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
def test_must_set_self(self):
user = UserFactory()
assert_equal(self.file.checkout, None)
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': user._id}}},
auth=self.user.auth,
expect_errors=True,
)
self.file.reload()
assert_equal(res.status_code, 400)
assert_equal(self.file.checkout, None)
def test_must_be_self(self):
user = AuthUserFactory()
self.file.checkout = self.user
self.file.save()
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': user._id}}},
auth=user.auth,
expect_errors=True,
)
self.file.reload()
assert_equal(res.status_code, 403)
assert_equal(self.file.checkout, self.user)
def test_admin_can_checkin(self):
user = UserFactory()
self.node.add_contributor(user)
self.file.checkout = user
self.file.save()
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': None}}},
auth=self.user.auth,
expect_errors=True,
)
self.file.reload()
self.node.reload()
assert_equal(res.status_code, 200)
assert_equal(self.file.checkout, None)
assert_equal(self.node.logs[-1].action, NodeLog.CHECKED_IN)
assert_equal(self.node.logs[-1].user, self.user)
def test_admin_can_checkout(self):
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth,
expect_errors=True,
)
self.file.reload()
self.node.reload()
assert_equal(res.status_code, 200)
assert_equal(self.file.checkout, self.user)
assert_equal(self.node.logs[-1].action, NodeLog.CHECKED_OUT)
assert_equal(self.node.logs[-1].user, self.user)
def test_cannot_checkin_when_already_checked_in(self):
count = len(self.node.logs)
assert_false(self.file.is_checked_out)
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': None}}},
auth=self.user.auth,
expect_errors=True,
)
self.file.reload()
self.node.reload()
assert_equal(res.status_code, 200)
assert_equal(len(self.node.logs), count)
assert_equal(self.file.checkout, None)
def test_cannot_checkout_when_checked_out(self):
user = UserFactory()
self.node.add_contributor(user)
self.file.checkout = user
self.file.save()
count = len(self.node.logs)
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth,
expect_errors=True,
)
self.file.reload()
self.node.reload()
assert_equal(res.status_code, 200)
assert_equal(self.file.checkout, user)
assert_equal(len(self.node.logs), count)
def test_noncontrib_cannot_checkout(self):
user = AuthUserFactory()
assert_equal(self.file.checkout, None)
assert user._id not in self.node.permissions.keys()
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': self.user._id}}},
auth=user.auth,
expect_errors=True,
)
self.file.reload()
self.node.reload()
assert_equal(res.status_code, 403)
assert_equal(self.file.checkout, None)
assert self.node.logs[-1].action != NodeLog.CHECKED_OUT
def test_read_contrib_cannot_checkout(self):
user = AuthUserFactory()
self.node.add_contributor(user, permissions=['read'])
self.node.save()
assert_false(self.node.can_edit(user=user))
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': None}}},
auth=user.auth,
expect_errors=True
)
self.file.reload()
assert_equal(res.status_code, 403)
assert_equal(self.file.checkout, None)
assert self.node.logs[-1].action != NodeLog.CHECKED_OUT
def test_user_can_checkin(self):
user = AuthUserFactory()
self.node.add_contributor(user, permissions=['read', 'write'])
self.node.save()
assert_true(self.node.can_edit(user=user))
self.file.checkout = user
self.file.save()
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': None}}},
auth=user.auth,
)
self.file.reload()
assert_equal(res.status_code, 200)
assert_equal(self.file.checkout, None)
def test_removed_contrib_files_checked_in(self):
user = AuthUserFactory()
self.node.add_contributor(user, permissions=['read', 'write'])
self.node.save()
assert_true(self.node.can_edit(user=user))
self.file.checkout = user
self.file.save()
assert_true(self.file.is_checked_out)
with capture_signals() as mock_signals:
self.node.remove_contributor(user, auth=Auth(user))
assert_equal(mock_signals.signals_sent(), set([contributor_removed]))
self.file.reload()
assert_false(self.file.is_checked_out)
def test_must_be_osfstorage(self):
self.file.provider = 'github'
self.file.save()
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth,
expect_errors=True,
)
assert_equal(res.status_code, 403)
def test_get_file_resolves_guids(self):
guid = self.file.get_guid(create=True)
url = '/{}files/{}/'.format(API_BASE, guid._id)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json.keys(), ['data'])
assert_equal(res.json['data']['attributes']['path'], self.file.path)
def test_get_file_invalid_guid_gives_404(self):
url = '/{}files/{}/'.format(API_BASE, 'asdasasd')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_get_file_non_file_guid_gives_404(self):
url = '/{}files/{}/'.format(API_BASE, self.node._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
class TestFileVersionView(ApiTestCase):
def setUp(self):
super(TestFileVersionView, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
self.osfstorage = self.node.get_addon('osfstorage')
self.root_node = self.osfstorage.get_root()
self.file = self.root_node.append_file('test_file')
self.file.create_version(self.user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png'
}).save()
def test_listing(self):
self.file.create_version(self.user, {
'object': '0683m38e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1347,
'contentType': 'img/png'
}).save()
res = self.app.get(
'/{}files/{}/versions/'.format(API_BASE, self.file._id),
auth=self.user.auth,
)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 2)
assert_equal(res.json['data'][0]['id'], '1')
assert_equal(res.json['data'][1]['id'], '2')
def test_by_id(self):
res = self.app.get(
'/{}files/{}/versions/1/'.format(API_BASE, self.file._id),
auth=self.user.auth,
)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], '1')
def test_read_only(self):
assert_equal(self.app.put(
'/{}files/{}/versions/1/'.format(API_BASE, self.file._id),
expect_errors=True,
auth=self.user.auth,
).status_code, 405)
assert_equal(self.app.post(
'/{}files/{}/versions/1/'.format(API_BASE, self.file._id),
expect_errors=True,
auth=self.user.auth,
).status_code, 405)
assert_equal(self.app.delete(
'/{}files/{}/versions/1/'.format(API_BASE, self.file._id),
expect_errors=True,
auth=self.user.auth,
).status_code, 405)
class TestFileTagging(ApiTestCase):
def setUp(self):
super(TestFileTagging, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
self.file1 = api_utils.create_test_file(
self.node, self.user, filename='file1')
self.payload = {
"data": {
"type": "files",
"id": self.file1._id,
"attributes": {
"checkout": None,
"tags": ["goofy"]
}
}
}
self.url = '/{}files/{}/'.format(API_BASE, self.file1._id)
def test_tags_add_properly(self):
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
# Ensure adding tag data is correct from the PUT response
assert_equal(len(res.json['data']['attributes']['tags']), 1)
assert_equal(res.json['data']['attributes']['tags'][0], 'goofy')
def test_tags_update_properly(self):
self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
# Ensure removing and adding tag data is correct from the PUT response
self.payload['data']['attributes']['tags'] = ['goofier']
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']['attributes']['tags']), 1)
assert_equal(res.json['data']['attributes']['tags'][0], 'goofier')
def test_tags_add_and_remove_properly(self):
self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
self.payload['data']['attributes']['tags'] = []
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']['attributes']['tags']), 0)
def test_put_wo_tags_doesnt_remove_tags(self):
self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
self.payload['data']['attributes'] = {'checkout': None}
res = self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
# Ensure adding tag data is correct from the PUT response
assert_equal(len(res.json['data']['attributes']['tags']), 1)
assert_equal(res.json['data']['attributes']['tags'][0], 'goofy')
def test_add_tag_adds_log(self):
count = len(self.node.logs)
self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
assert_equal(len(self.node.logs), count + 1)
assert_equal(NodeLog.FILE_TAG_ADDED, self.node.logs[-1].action)
def test_remove_tag_adds_log(self):
self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
self.payload['data']['attributes']['tags'] = []
count = len(self.node.logs)
self.app.put_json_api(self.url, self.payload, auth=self.user.auth)
assert_equal(len(self.node.logs), count + 1)
assert_equal(NodeLog.FILE_TAG_REMOVED, self.node.logs[-1].action)
|
|
from oslo_config import cfg
from oslo_log import log as logging
from nca47.common import service
from oslo_utils import timeutils
from nca47.common.i18n import _
from nca47.common.i18n import _LI
from nca47 import agent
from nca47 import objects
from nca47.agent.agentFlag.agent_rpcapi import AgentAPI
from oslo_service import loopingcall
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
RPC_API_VERSION = '1.0'
count = 0
AGENT_OPTS = [
cfg.IntOpt('report_interval',
default='60',
help=_('Seconds between nodes reporting state to server; '
'should be less than agent_down_time, best if it '
'is half or less than agent_down_time.')),
cfg.IntOpt('agent_down_time',
default='120',
help=_('Seconds to regard the agent is down; should be at '
'least twice report_interval, to be sure the '
'agent is down for good.')),
]
opt_group = cfg.OptGroup(name='agent',
title='Options for nca47 agent node info')
CONF.register_group(opt_group)
CONF.register_opts(AGENT_OPTS, opt_group)
class DNSService(service.RPCService, service.Service):
""" Use for handling DNS requests and validation request parameters"""
RPC_API_VERSION = '1.0'
# Since the RPC Service class will be use for handle/reply all message
# for every RPC client, so will initialize some keys
def __init__(self, topic='dns_manager', agentinfo=None, threads=None):
self.rpc_topic = topic
super(DNSService, self).__init__(threads=threads)
self.agent = agent.get_dns_backend()
# self.agent_rpcapi = AgentAPI.get_instance()
# periodic = loopingcall.FixedIntervalLoopingCall(self.get_agent_status,
# agentinfo)
# periodic.start(interval=CONF.agent.report_interval)
def get_agent_status(self, agentInfo):
try:
self.agent_rpcapi.report_agent_state(agentInfo)
except Exception as e:
raise e
@property
def service_name(self):
return self.rpc_topic
def start(self):
super(DNSService, self).start()
def stop(self):
super(DNSService, self).stop()
# Zone Methods
def create_zone(self, context, zone):
LOG.info(_LI("create_zone: Replying rpc client's create_zone."))
zone = self.agent.create_zone(context, zone)
return zone
def update_zone(self, context, zone, zone_id):
LOG.info(_LI("update_zone: Replying rpc client's update_zone."))
zone = self.agent.update_zone(context, zone, zone_id)
return zone
def update_zone_owners(self, context, zone, zone_id):
LOG.info(_LI("update_zone_owners: Replying rpcclient's update_zone."))
zone = self.agent.update_zone_owners(context, zone, zone_id)
return zone
def delete_zone(self, context, zone_id):
LOG.info(_LI("delete_zone: Replying rpc client's delete_zone."))
response = self.agent.delete_zone(context, zone_id)
return response
def get_zone_one(self, context, zone_id):
LOG.info(_LI("get_zone_one: Replying rpc client's "
"get_zone_one."))
response = self.agent.get_zone_one(context, zone_id)
return response
def get_zones(self, context):
LOG.info(_LI("get_zones: Replying rpc client's get_zones."))
response = self.agent.get_zones(context)
return response
# Zone_records Methods
def create_record(self, context, records_dic, zone_id):
LOG.info(_LI("create_record: Calling central's create_zone_record."))
response = self.agent.create_rrs(context, records_dic, zone_id)
return response
def get_records(self, context, zone_id):
LOG.info(_LI("get_records: Calling central's get_zone_record."))
response = self.agent.get_rrs(context, zone_id)
return response
def update_record(self, context, records_dic, zone_id, record_id):
LOG.info(_LI("update_record: Calling central's update_zone_record."))
response = self.agent.update_rrs(context, records_dic, zone_id,
record_id)
return response
def delete_record(self, context, zone_id, record_id):
LOG.info(_LI("delete_record: Calling central's delete_zone_record."))
response = self.agent.delete_rrs(context, zone_id, record_id)
return response
def del_cache(self, context, cache_dic):
LOG.info(_LI("del_cache: Calling central's del_cache."))
response = self.agent.del_cache(context, cache_dic)
return response
def glsb_math(self, context, obj_dic, math):
LOG.info(_LI("glsb_math: Replying rpc client's glsb_math."))
funt = getattr(self.agent, math)
response = funt(context, obj_dic)
return response
class FWService(service.RPCService, service.Service):
"""
Use for handling FireWall's requests and validation
request parametes
"""
RPC_API_VERSION = '1.0'
def __init__(self, topic='firewall_manager', agentinfo=None,
threads=None):
if agentinfo:
self.rpc_topic = '%s.%s' % (topic, agentinfo['agent_ip'])
else:
self.rpc_topic = topic
super(FWService, self).__init__(threads=threads)
self.agent = agent.get_firewall_backend()
@property
def service_name(self):
return self.rpc_topic
def start(self):
super(FWService, self).start()
def stop(self):
super(FWService, self).stop()
# this is a vlan operation
def create_vlan(self, context, vlan_infos):
LOG.info(_LI("create_vlan: Calling central's create_vlan."))
response = self.agent.create_vlan(context, vlan_infos)
return response
def del_vlan(self, context, id_, vlan_infos):
LOG.info(_LI("del_vlan: Calling central's del_vlan."))
response = self.agent.del_vlan(context, id_, vlan_infos)
return response
def get_vlan(self, context, vlan_infos):
LOG.info(_LI("get_vlan: Calling central's get_vlan."))
response = self.agent.get_vlan(context, vlan_infos)
return response
def get_vlans(self, context, vlan_infos):
LOG.info(_LI("get_vlans: Calling central's get_vlans."))
response = self.agent.get_vlans(context, vlan_infos)
return response
# this is a netservice operation
def create_netservice(self, context, netsev_infos):
LOG.info(_LI("create_netservice: Calling central's"
"create_netservice."))
response = self.agent.create_netservice(context, netsev_infos)
return response
def del_netservice(self, context, id_, netsev_infos):
LOG.info(_LI("del_netservice: Calling central's del_netservice."))
response = self.agent.del_netservice(context, id_, netsev_infos)
return response
def get_netservice(self, context, netsev_infos):
LOG.info(_LI("get_netservice: Calling central's get_netservice."))
response = self.agent.get_netservice(context, netsev_infos)
return response
def get_netservices(self, context, netsev_infos):
LOG.info(_LI("get_netservices: Calling central's get_netservices."))
response = self.agent.get_netservices(context, netsev_infos)
return response
# this is a addrobj operation
def add_addrobj(self, context, addrobj_infos):
LOG.info(_LI("add_addrobj: Calling central's add_addrobj."))
response = self.agent.add_addrobj(context, addrobj_infos)
return response
def del_addrobj(self, context, addrobj_infos):
LOG.info(_LI("del_addrobj: Calling central's del_addrobj."))
response = self.agent.del_addrobj(
context, addrobj_infos['id'], addrobj_infos)
return response
def get_addrobj(self, context, addrobj_infos):
LOG.info(_LI("get_addrobj: Calling central's get_addrobj."))
response = self.agent.get_addrobj(context, addrobj_infos)
return response
def get_addrobjs(self, context, addrobj_infos):
LOG.info(_LI("get_addrobjs: Calling central's get_addrobjs."))
response = self.agent.get_addrobjs(context, addrobj_infos)
return response
# this is a snataddrpool operation
def add_snataddrpool(self, context, snataddrpool_infos):
LOG.info(_LI("add_snataddrpool: Calling central's add_snataddrpool."))
response = self.agent.add_snataddrpool(context, snataddrpool_infos)
return response
def del_snataddrpool(self, context, snataddrpool_infos):
LOG.info(_LI("del_snataddrpool: Calling central's del_snataddrpool."))
response = self.agent.del_snataddrpool(
context, snataddrpool_infos['id'], snataddrpool_infos)
return response
def get_snataddrpool(self, context, snataddrpool_infos):
LOG.info(_LI("get_snataddrpool: Calling central's get_snataddrpool."))
response = self.agent.get_snataddrpool(context, snataddrpool_infos)
return response
def get_snataddrpools(self, context, snataddrpool_infos):
LOG.info(_LI("get_snataddrpools: Calling central's"
"get_snataddrpools."))
response = self.agent.get_snataddrpools(context, snataddrpool_infos)
return response
def create_vfw(self, context, vfw):
LOG.info(_LI("create_vfw: Calling central's create_vfw."))
response = self.agent.create_vfw(context, vfw)
return response
def delete_vfw(self, context, vfw):
LOG.info(_LI("delete_vfw: Calling central's delete_vfw."))
response = self.agent.delete_vfw(context, vfw)
return response
def get_vfw(self, context, vfw):
LOG.info(_LI("get_vfw: Calling central's get_vfw."))
response = self.agent.get_vfw(context, vfw)
return response
def get_all_vfws(self, context, vfw):
LOG.info(_LI("get_all_vfws: Calling central's get_all_vfws."))
response = self.agent.get_all_vfws(context, vfw)
return response
def create_dnat(self, context, dnat):
LOG.info(_LI("create_dnat: Calling central's create_dnat."))
response = self.agent.create_dnat(context, dnat)
return response
def delete_dnat(self, context, dnat):
LOG.info(_LI("delete_dnat: Calling central's delete_dnat."))
response = self.agent.delete_dnat(context, dnat)
return response
def get_dnat(self, context, dnat):
LOG.info(_LI("get_dnat: Calling central's get_dnat."))
response = self.agent.get_dnat(context, dnat)
return response
def get_all_dnats(self, context, dnat):
LOG.info(_LI("get_all_dnats: Calling central's get_all_dnats."))
response = self.agent.get_all_dnats(context, dnat)
return response
def create_packetfilter(self, context, packetfilter):
LOG.info(_LI("create_packetfilter: Calling central's"
"create_packetfilter."))
response = self.agent.create_packetfilter(context, packetfilter)
return response
def delete_packetfilter(self, context, packetfilter):
LOG.info(_LI("delete_packetfilter: Calling central's"
"delete_packetfilter."))
response = self.agent.delete_packetfilter(context, packetfilter)
return response
def get_packetfilter(self, context, packetfilter):
LOG.info(_LI("get_packetfilter: Calling central's get_packetfilter."))
response = self.agent.get_packetfilter(context, packetfilter)
return response
def get_all_packetfilters(self, context, packetfilter):
LOG.info(_LI("get_all_packetfilters: Calling central's"
"get_all_packetfilters."))
response = self.agent.get_all_packetfilters(context, packetfilter)
return response
def create_vrf(self, context, vrf):
LOG.info(_LI("create_vrf: Calling central's create_vrf."))
response = self.agent.create_vrf(context, vrf)
return response
def del_vrf(self, context, vrf):
LOG.info(_LI("del_vrf: Calling central's del_vrf."))
response = self.agent.del_vrf(context, vrf)
return response
def get_vrf(self, context, vrf):
LOG.info(_LI("get_vrf: Calling central's get_vrf."))
response = self.agent.get_vrf(context, vrf)
return response
def get_vrfs(self, context, vrf):
LOG.info(_LI("get_vrfs: Calling central's get_vrfs."))
response = self.agent.get_vrfs(context, vrf)
return response
def create_snat(self, context, snat):
LOG.info(_LI("create_snat: Calling central's create_snat."))
response = self.agent.create_snat(context, snat)
return response
def del_snat(self, context, snat):
LOG.info(_LI("del_snat: Calling central's del_snat."))
response = self.agent.del_snat(context, snat)
return response
def get_snat(self, context, snat):
LOG.info(_LI("get_snat: Calling central's get_snat."))
response = self.agent.get_snat(context, snat)
return response
def get_snats(self, context, snat):
LOG.info(_LI("get_snats: Calling central's get_snats."))
response = self.agent.get_snats(context, snat)
return response
def create_securityZone(self, context, securityzone):
LOG.info(_LI("create_securityZone: Calling central's"
"create_securityZone."))
response = self.agent.create_securityZone(context, securityzone)
return response
def securityZone_addif(self, context, securityzone):
LOG.info(_LI("securityZone_addif: Calling central's"
"securityZone_addif."))
response = self.agent.securityZone_addif(context, securityzone)
return response
def securityZone_delif(self, context, securityzone):
LOG.info(_LI("securityZone_delif: Calling central's"
"securityZone_delif."))
response = self.agent.securityZone_delif(context, securityzone)
return response
def del_securityZone(self, context, securityzone):
LOG.info(_LI("del_securityZone: Calling central's del_securityZone."))
response = self.agent.del_securityZone(context, securityzone)
return response
def get_securityZone(self, context, securityzone):
LOG.info(_LI("get_securityZone: Calling central's"
"get_securityZone."))
response = self.agent.get_securityZone(context, securityzone)
return response
def get_securityZones(self, context, securityzone):
LOG.info(_LI("get_securityZones: Calling central's"
"get_securityZones."))
response = self.agent.get_securityZones(context, securityzone)
return response
def create_staticnat(self, context, staticnat):
LOG.info(_LI("create_staticnat: Calling central's create_staticnat."))
response = self.agent.create_staticnat(context, staticnat)
return response
def del_staticnat(self, context, staticnat):
LOG.info(_LI("del_staticnat: Calling central's del_staticnat."))
response = self.agent.del_staticnat(context, staticnat)
return response
def get_staticnat(self, context, staticnat):
LOG.info(_LI("get_staticnat: Calling central's get_staticnat."))
response = self.agent.get_staticnat(context, staticnat)
return response
def get_staticnats(self, context, staticnat):
LOG.info(_LI("get_staticnats: Calling central's get_staticnats."))
response = self.agent.get_staticnats(context, staticnat)
return response
class AgentService(service.RPCService, service.Service):
"""
Use for handling device-agent's requests and validation
request parametes
"""
RPC_API_VERSION = '1.0'
def __init__(self, topic='check_agent_heartbeat', threads=None):
self.rpc_topic = topic
super(AgentService, self).__init__(threads=threads)
@property
def service_name(self):
return self.rpc_topic
def start(self):
super(AgentService, self).start()
def stop(self):
super(AgentService, self).stop()
def report_agent_state(self, context, agent_info):
LOG.info(_LI("updating agent state: Replying rpc client's "
"report_agent_state."))
agent_obj = objects.Agent(context, **agent_info)
# Check the target agent object whether exist in DB
conditions = {}
conditions['dc_name'] = agent_info['dc_name']
conditions['network_zone'] = agent_info['network_zone']
conditions['agent_ip'] = agent_info['agent_ip']
conditions['agent_nat_ip'] = agent_info['agent_nat_ip']
conditions['agent_type'] = agent_info['agent_type']
conditions['deleted'] = False
target_agent = None
try:
target_agent = agent_obj.get_object(context, **conditions)
except:
LOG.info(_LI('cannot find related agent record in DB, so think '
'this agent info as new, need to save in DB'))
pass
if target_agent:
update_agent = {}
update_agent['update_time'] = timeutils.utcnow()
update_agent['availiable'] = 'yes'
update_agent['status'] = 'OK'
update_infos = objects.Agent(context, **update_agent)
agent_obj.update(context, target_agent['id'],
update_infos.as_dict())
else:
agent_obj.availiable = 'yes'
agent_obj.status = 'OK'
agent_obj.update_time = timeutils.utcnow()
agent_obj.create(context, agent_obj.as_dict())
return agent
class CLIService(service.RPCService, service.Service):
"""
Use for handling command-line interface requests and validation
request parametes
"""
RPC_API_VERSION = '1.0'
def __init__(self, topic='cli_manager', agentinfo=None, threads=None):
if agentinfo:
self.rpc_topic = '%s.%s' % (topic, agentinfo['agent_ip'])
else:
self.rpc_topic = topic
super(CLIService, self).__init__(threads=threads)
self.agent = agent.get_cli_backend()
@property
def service_name(self):
return self.rpc_topic
def start(self):
super(CLIService, self).start()
def stop(self):
super(CLIService, self).stop()
def execute_commands(self, context, req):
cli_client = self.agent.sshClient(**req)
commands = req['commands']
response = cli_client.send(commands)
return response
|
|
##########################################################################
#
# Copyright 2012 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
from d3d9 import *
REFERENCE_TIME = Alias("REFERENCE_TIME", LONGLONG)
DXVA2_ProcAmp = Flags(UINT, [
"DXVA2_ProcAmp_None",
"DXVA2_ProcAmp_Brightness",
"DXVA2_ProcAmp_Contrast",
"DXVA2_ProcAmp_Hue",
"DXVA2_ProcAmp_Saturation",
])
HRESULT = FakeEnum(HRESULT, [
"DXVA2_E_NOT_INITIALIZED",
"DXVA2_E_NEW_VIDEO_DEVICE",
"DXVA2_E_VIDEO_DEVICE_LOCKED",
"DXVA2_E_NOT_AVAILABLE",
])
DXVA2_SampleFormat = Enum("DXVA2_SampleFormat", [
"DXVA2_SampleFormatMask",
"DXVA2_SampleUnknown",
"DXVA2_SampleProgressiveFrame",
"DXVA2_SampleFieldInterleavedEvenFirst",
"DXVA2_SampleFieldInterleavedOddFirst",
"DXVA2_SampleFieldSingleEven",
"DXVA2_SampleFieldSingleOdd",
"DXVA2_SampleSubStream",
])
DXVA2_VideoChromaSubSampling = Enum("DXVA2_VideoChromaSubSampling", [
"DXVA2_VideoChromaSubsamplingMask",
"DXVA2_VideoChromaSubsampling_Unknown",
"DXVA2_VideoChromaSubsampling_ProgressiveChroma",
"DXVA2_VideoChromaSubsampling_Horizontally_Cosited",
"DXVA2_VideoChromaSubsampling_Vertically_Cosited",
"DXVA2_VideoChromaSubsampling_Vertically_AlignedChromaPlanes",
"DXVA2_VideoChromaSubsampling_MPEG2",
"DXVA2_VideoChromaSubsampling_MPEG1",
"DXVA2_VideoChromaSubsampling_DV_PAL",
"DXVA2_VideoChromaSubsampling_Cosited",
])
DXVA2_NominalRange = Enum("DXVA2_NominalRange", [
"DXVA2_NominalRangeMask",
"DXVA2_NominalRange_Unknown",
"DXVA2_NominalRange_Normal",
"DXVA2_NominalRange_Wide",
"DXVA2_NominalRange_0_255",
"DXVA2_NominalRange_16_235",
"DXVA2_NominalRange_48_208",
])
DXVA2_VideoLighting = Enum("DXVA2_VideoLighting", [
"DXVA2_VideoLightingMask",
"DXVA2_VideoLighting_Unknown",
"DXVA2_VideoLighting_bright",
"DXVA2_VideoLighting_office",
"DXVA2_VideoLighting_dim",
"DXVA2_VideoLighting_dark",
])
DXVA2_VideoPrimaries = Enum("DXVA2_VideoPrimaries", [
"DXVA2_VideoPrimariesMask",
"DXVA2_VideoPrimaries_Unknown",
"DXVA2_VideoPrimaries_reserved",
"DXVA2_VideoPrimaries_BT709",
"DXVA2_VideoPrimaries_BT470_2_SysM",
"DXVA2_VideoPrimaries_BT470_2_SysBG",
"DXVA2_VideoPrimaries_SMPTE170M",
"DXVA2_VideoPrimaries_SMPTE240M",
"DXVA2_VideoPrimaries_EBU3213",
"DXVA2_VideoPrimaries_SMPTE_C",
])
DXVA2_VideoTransferFunction = Enum("DXVA2_VideoTransferFunction", [
"DXVA2_VideoTransFuncMask",
"DXVA2_VideoTransFunc_Unknown",
"DXVA2_VideoTransFunc_10",
"DXVA2_VideoTransFunc_18",
"DXVA2_VideoTransFunc_20",
"DXVA2_VideoTransFunc_22",
"DXVA2_VideoTransFunc_709",
"DXVA2_VideoTransFunc_240M",
"DXVA2_VideoTransFunc_sRGB",
"DXVA2_VideoTransFunc_28",
])
DXVA2_SurfaceType = FakeEnum(DWORD, [
"DXVA2_SurfaceType_DecoderRenderTarget",
"DXVA2_SurfaceType_ProcessorRenderTarget",
"DXVA2_SurfaceType_D3DRenderTargetTexture",
])
DXVA2_VideoTransferMatrix = Enum("DXVA2_VideoTransferMatrix", [
"DXVA2_VideoTransferMatrixMask",
"DXVA2_VideoTransferMatrix_Unknown",
"DXVA2_VideoTransferMatrix_BT709",
"DXVA2_VideoTransferMatrix_BT601",
"DXVA2_VideoTransferMatrix_SMPTE240M",
])
DXVA2_AYUVSample16 = Struct("DXVA2_AYUVSample16", [
(USHORT, "Cr"),
(USHORT, "Cb"),
(USHORT, "Y"),
(USHORT, "Alpha"),
])
DXVA2_AYUVSample8 = Struct("DXVA2_AYUVSample8", [
(UCHAR, "Cr"),
(UCHAR, "Cb"),
(UCHAR, "Y"),
(UCHAR, "Alpha"),
])
DXVA2_ConfigPictureDecode = Struct("DXVA2_ConfigPictureDecode", [
(GUID, "guidConfigBitstreamEncryption"),
(GUID, "guidConfigMBcontrolEncryption"),
(GUID, "guidConfigResidDiffEncryption"),
(UINT, "ConfigBitstreamRaw"),
(UINT, "ConfigMBcontrolRasterOrder"),
(UINT, "ConfigResidDiffHost"),
(UINT, "ConfigSpatialResid8"),
(UINT, "ConfigResid8Subtraction"),
(UINT, "ConfigSpatialHost8or9Clipping"),
(UINT, "ConfigSpatialResidInterleaved"),
(UINT, "ConfigIntraResidUnsigned"),
(UINT, "ConfigResidDiffAccelerator"),
(UINT, "ConfigHostInverseScan"),
(UINT, "ConfigSpecificIDCT"),
(UINT, "Config4GroupedCoefs"),
(USHORT, "ConfigMinRenderTargetBuffCount"),
(USHORT, "ConfigDecoderSpecific"),
])
DXVA2_DecodeBufferDesc = Struct("DXVA2_DecodeBufferDesc", [
(DWORD, "CompressedBufferType"),
(UINT, "BufferIndex"),
(UINT, "DataOffset"),
(UINT, "DataSize"),
(UINT, "FirstMBaddress"),
(UINT, "NumMBsInBuffer"),
(UINT, "Width"),
(UINT, "Height"),
(UINT, "Stride"),
(UINT, "ReservedBits"),
(PVOID, "pvPVPState"),
])
DXVA2_DecodeExtensionData = Struct("DXVA2_DecodeExtensionData", [
(UINT, "Function"),
(PVOID, "pPrivateInputData"),
(UINT, "PrivateInputDataSize"),
(PVOID, "pPrivateOutputData"),
(UINT, "PrivateOutputDataSize"),
])
DXVA2_DecodeExecuteParams = Struct("DXVA2_DecodeExecuteParams", [
(UINT, "NumCompBuffers"),
(Array(DXVA2_DecodeBufferDesc, "{self}.NumCompBuffers"), "pCompressedBuffers"),
(Pointer(DXVA2_DecodeExtensionData), "pExtensionData"),
])
DXVA2_ExtendedFormat = Struct("DXVA2_ExtendedFormat", [
(UINT, "value"),
])
DXVA2_Fixed32 = Struct("DXVA2_Fixed32", [
(USHORT, "Fraction"),
(SHORT, "Value"),
])
DXVA2_FilterValues = Struct("DXVA2_FilterValues", [
(DXVA2_Fixed32, "Level"),
(DXVA2_Fixed32, "Threshold"),
(DXVA2_Fixed32, "Radius"),
])
DXVA2_Frequency = Struct("DXVA2_Frequency", [
(UINT, "Numerator"),
(UINT, "Denominator"),
])
DXVA2_ProcAmpValues = Struct("DXVA2_ProcAmpValues", [
(DXVA2_Fixed32, "Brightness"),
(DXVA2_Fixed32, "Contrast"),
(DXVA2_Fixed32, "Hue"),
(DXVA2_Fixed32, "Saturation"),
])
DXVA2_ValueRange = Struct("DXVA2_ValueRange", [
(DXVA2_Fixed32, "MinValue"),
(DXVA2_Fixed32, "MaxValue"),
(DXVA2_Fixed32, "DefaultValue"),
(DXVA2_Fixed32, "StepSize"),
])
DXVA2_VideoDesc = Struct("DXVA2_VideoDesc", [
(UINT, "SampleWidth"),
(UINT, "SampleHeight"),
(DXVA2_ExtendedFormat, "SampleFormat"),
(D3DFORMAT, "Format"),
(DXVA2_Frequency, "InputSampleFreq"),
(DXVA2_Frequency, "OutputFrameFreq"),
(UINT, "UABProtectionLevel"),
(UINT, "Reserved"),
])
DXVA2_VideoProcessBltParams = Struct("DXVA2_VideoProcessBltParams", [
(REFERENCE_TIME, "TargetFrame"),
(RECT, "TargetRect"),
(SIZE, "ConstrictionSize"),
(UINT, "StreamingFlags"),
(DXVA2_AYUVSample16, "BackgroundColor"),
(DXVA2_ExtendedFormat, "DestFormat"),
(DXVA2_ProcAmpValues, "ProcAmpValues"),
(DXVA2_Fixed32, "Alpha"),
(DXVA2_FilterValues, "NoiseFilterLuma"),
(DXVA2_FilterValues, "NoiseFilterChroma"),
(DXVA2_FilterValues, "DetailFilterLuma"),
(DXVA2_FilterValues, "DetailFilterChroma"),
(DWORD, "DestData"),
])
DXVA2_VideoProcessorCaps = Struct("DXVA2_VideoProcessorCaps", [
(UINT, "DeviceCaps"),
(D3DPOOL, "InputPool"),
(UINT, "NumForwardRefSamples"),
(UINT, "NumBackwardRefSamples"),
(UINT, "Reserved"),
(UINT, "DeinterlaceTechnology"),
(UINT, "ProcAmpControlCaps"),
(UINT, "VideoProcessorOperations"),
(UINT, "NoiseFilterTechnology"),
(UINT, "DetailFilterTechnology"),
])
# See also DXVADDI_PVP_KEY128
DXVA2_PVP_KEY128 = Struct('DXVA2_PVP_KEY128', [
(Array(BYTE, 16), 'Data'),
])
# See also DXVADDI_PVP_SETKEY
DXVA2_PVP_SETKEY = Struct('DXVA2_PVP_SETKEY', [
(DXVA2_PVP_KEY128, 'ContentKey'),
])
DXVA2_DECODEBUFFERDESC = Struct("DXVA2_DECODEBUFFERDESC", [
(ObjPointer(IDirect3DSurface9), "pRenderTarget"),
(DWORD, "CompressedBufferType"),
(DWORD, "BufferIndex"),
(DWORD, "DataOffset"),
(DWORD, "DataSize"),
(DWORD, "FirstMBaddress"),
(DWORD, "NumMBsInBuffer"),
(DWORD, "Width"),
(DWORD, "Height"),
(DWORD, "Stride"),
(DWORD, "ReservedBits"),
(PVOID, "pCipherCounter"),
])
DXVA2_DECODEEXECUTE = Struct("DXVA2_DECODEEXECUTE", [
(UINT, "NumCompBuffers"),
(Array(DXVA2_DECODEBUFFERDESC, "{self}.NumCompBuffers"), "pCompressedBuffers"),
])
DXVA2_VIDEOSAMPLE = Struct("DXVA2_VIDEOSAMPLE", [
(REFERENCE_TIME, "Start"),
(REFERENCE_TIME, "End"),
(DXVA2_ExtendedFormat, "SampleFormat"),
(DWORD, "SampleFlags"),
(ObjPointer(IDirect3DSurface9), "SrcSurface"),
(RECT, "SrcRect"),
(RECT, "DstRect"),
#(Array(DXVA2_AYUVSample8, 16), "Pal"),
(DXVA2_Fixed32, "PlanarAlpha"),
])
DXVA2_VIDEOPROCESSBLT = Struct("DXVA2_VIDEOPROCESSBLT", [
(REFERENCE_TIME, "TargetFrame"),
(RECT, "TargetRect"),
(SIZE, "ConstrictionSize"),
(DWORD, "StreamingFlags"),
(DXVA2_AYUVSample16, "BackgroundColor"),
(DXVA2_ExtendedFormat, "DestFormat"),
(DWORD, "DestFlags"),
(DXVA2_ProcAmpValues, "ProcAmpValues"),
(DXVA2_Fixed32, "Alpha"),
(DXVA2_FilterValues, "NoiseFilterLuma"),
(DXVA2_FilterValues, "NoiseFilterChroma"),
(DXVA2_FilterValues, "DetailFilterLuma"),
(DXVA2_FilterValues, "DetailFilterChroma"),
(Array(DXVA2_VIDEOSAMPLE, "{self}.NumSrcSurfaces"), "pSrcSurfaces"),
(UINT, "NumSrcSurfaces"),
])
DXVA2_EXTENSIONEXECUTE = Opaque('DXVA2_EXTENSIONEXECUTE')
DXVA2_DECODEBUFFERINFO = Opaque('DXVA2_DECODEBUFFERINFO')
IDirect3DDecodeDevice9 = Interface("IDirect3DDecodeDevice9", IUnknown)
IDirect3DDecodeDevice9.methods += [
StdMethod(HRESULT, "DecodeBeginFrame", [(Pointer(DXVA2_PVP_SETKEY), "pPVPSetKey")]),
StdMethod(HRESULT, "DecodeEndFrame", [(Pointer(HANDLE), "pHandleComplete")]),
StdMethod(HRESULT, "DecodeSetRenderTarget", [(ObjPointer(IDirect3DSurface9), "pRenderTarget")]),
StdMethod(HRESULT, "DecodeExecute", [(Pointer(DXVA2_DECODEEXECUTE), "pExecuteParams")]),
]
IDirect3DVideoProcessDevice9 = Interface("IDirect3DVideoProcessDevice9", IUnknown)
IDirect3DVideoProcessDevice9.methods += [
StdMethod(HRESULT, "VideoProcessBeginFrame", []),
StdMethod(HRESULT, "VideoProcessEndFrame", [(Pointer(HANDLE), "pHandleComplete")]),
StdMethod(HRESULT, "VideoProcessSetRenderTarget", [(ObjPointer(IDirect3DSurface9), "pRenderTarget")]),
StdMethod(HRESULT, "VideoProcessBlt", [(Pointer(DXVA2_VIDEOPROCESSBLT), "pData")]),
]
IDirect3DDXVAExtensionDevice9 = Interface("IDirect3DDXVAExtensionDevice9", IUnknown)
IDirect3DDXVAExtensionDevice9.methods += [
StdMethod(HRESULT, "ExtensionExecute", [(OpaquePointer(DXVA2_EXTENSIONEXECUTE), "pData")]),
]
IDirect3DDxva2Container9 = Interface("IDirect3DDxva2Container9", IUnknown)
IDirect3DDxva2Container9.methods += [
StdMethod(HRESULT, "CreateSurface", [(UINT, "Width"), (UINT, "Height"), (UINT, "BackBuffers"), (D3DFORMAT, "Format"), (D3DPOOL, "Pool"), (DWORD, "Usage"), (DXVA2_SurfaceType, "DxvaType"), Out(Array(ObjPointer(IDirect3DSurface9), "1 + BackBuffers"), "ppSurface"), (Pointer(HANDLE), "pSharedHandle")]),
StdMethod(HRESULT, "VidToSysBlt", [(ObjPointer(IDirect3DSurface9), "pSourceSurface"), (Pointer(RECT), "pSourceRect"), (ObjPointer(IDirect3DSurface9), "pDestSurface"), (Pointer(RECT), "pDestRect")]),
StdMethod(HRESULT, "GetDecodeGuidCount", [Out(Pointer(UINT), "pCount")], sideeffects=False),
StdMethod(HRESULT, "GetDecodeGuids", [(UINT, "Count"), Out(Array(GUID, "Count"), "pGuids")], sideeffects=False),
StdMethod(HRESULT, "GetDecodeRenderTargetFormatCount", [(REFGUID, "Guid"), Out(Pointer(UINT), "pCount")], sideeffects=False),
StdMethod(HRESULT, "GetDecodeRenderTargets", [(REFGUID, "Guid"), (UINT, "Count"), Out(Array(D3DFORMAT, "Count"), "pFormats")], sideeffects=False),
StdMethod(HRESULT, "GetDecodeCompressedBufferCount", [(REFGUID, "Guid"), (Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), Out(Pointer(UINT), "pCount")], sideeffects=False),
StdMethod(HRESULT, "GetDecodeCompressedBuffers", [(REFGUID, "Guid"), (Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), (UINT, "Count"), Out(OpaquePointer(DXVA2_DECODEBUFFERINFO), "pBufferInfo")], sideeffects=False),
StdMethod(HRESULT, "GetDecodeConfigurationCount", [(REFGUID, "Guid"), (Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), Out(Pointer(UINT), "pCount")], sideeffects=False),
StdMethod(HRESULT, "GetDecodeConfigurations", [(REFGUID, "Guid"), (Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), (UINT, "Count"), Out(Array(DXVA2_ConfigPictureDecode, "Count"), "pConfigs")], sideeffects=False),
StdMethod(HRESULT, "CreateDecodeDevice", [(REFGUID, "Guid"), (Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), (Pointer(Const(DXVA2_ConfigPictureDecode)), "pConfig"), (Array(ObjPointer(IDirect3DSurface9), "NumSurfaces"), "ppDecoderRenderTargets"), (UINT, "NumSurfaces"), Out(Pointer(ObjPointer(IDirect3DDecodeDevice9)), "ppDecode")]),
StdMethod(HRESULT, "GetVideoProcessorDeviceGuidCount", [(Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), Out(Pointer(UINT), "pCount")], sideeffects=False),
StdMethod(HRESULT, "GetVideoProcessorDeviceGuids", [(Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), (UINT, "Count"), Out(Pointer(GUID), "pGuids")], sideeffects=False),
StdMethod(HRESULT, "GetVideoProcessorCaps", [(REFGUID, "Guid"), (Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), (D3DFORMAT, "Format"), Out(Pointer(DXVA2_VideoProcessorCaps), "pCaps")], sideeffects=False),
StdMethod(HRESULT, "GetProcAmpRange", [(REFGUID, "Guid"), (Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), (D3DFORMAT, "Format"), (UINT, "ProcAmpCap"), Out(Pointer(DXVA2_ValueRange), "pRange")]),
StdMethod(HRESULT, "GetFilterPropertyRange", [(REFGUID, "Guid"), (Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), (D3DFORMAT, "Format"), (UINT, "FilterSetting"), Out(Pointer(DXVA2_ValueRange), "pRange")], sideeffects=False),
StdMethod(HRESULT, "GetVideoProcessorRenderTargetCount", [(REFGUID, "Guid"), (Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), Out(Pointer(UINT), "pCount")], sideeffects=False),
StdMethod(HRESULT, "GetVideoProcessorRenderTargets", [(REFGUID, "Guid"), (Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), (UINT, "Count"), Out(Array(D3DFORMAT, "Count"), "pFormats")], sideeffects=False),
StdMethod(HRESULT, "GetVideoProcessorSubStreamFormatCount", [(REFGUID, "Guid"), (Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), (D3DFORMAT, "Format"), Out(Pointer(UINT), "pCount")], sideeffects=False),
StdMethod(HRESULT, "GetVideoProcessorSubStreamFormats", [(REFGUID, "Guid"), (Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), (D3DFORMAT, "Format"), (UINT, "Count"), Out(Array(D3DFORMAT, "Count"), "pFormats")], sideeffects=False),
StdMethod(HRESULT, "CreateVideoProcessDevice", [(REFGUID, "Guid"), (Pointer(Const(DXVA2_VideoDesc)), "pVideoDesc"), (D3DFORMAT, "Format"), (UINT, "MaxSubStreams"), Out(Pointer(ObjPointer(IDirect3DVideoProcessDevice9)), "ppVideoProcessDevice")]),
StdMethod(HRESULT, "GetExtensionGuidCount", [(DWORD, "Extension"), Out(Pointer(UINT), "pCount")], sideeffects=False),
StdMethod(HRESULT, "GetExtensionGuids", [(DWORD, "Extension"), (UINT, "Count"), Out(Array(GUID, "Count"), "pGuids")], sideeffects=False),
StdMethod(HRESULT, "GetExtensionCaps", [(REFGUID, "Guid"), (UINT, "arg2"), (OpaquePointer(Void), "arg3"), (UINT, "arg4"), (OpaquePointer(Void), "arg5"), (UINT, "arg6")], sideeffects=False),
StdMethod(HRESULT, "CreateExtensionDevice", [(REFGUID, "Guid"), (OpaquePointer(Void), "arg2"), (UINT, "arg3"), Out(Pointer(ObjPointer(IDirect3DDXVAExtensionDevice9)), "ppExtension")]),
]
d3d9.addInterfaces([
IDirect3DDxva2Container9,
])
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A module to use service stubs for testing.
To test applications which use App Engine services such as the
datastore, developers can use the available stub
implementations. Service stubs behave like the original service
without permanent side effects. The datastore stub for example allows
to write entities into memory without storing them to the actual
datastore. This module makes using those stubs for testing easier.
Here is a basic example:
'''
import unittest
from google.appengine.ext import db
from google.appengine.ext import testbed
class TestModel(db.Model):
number = db.IntegerProperty(default=42)
class MyTestCase(unittest.TestCase):
def setUp(self):
# At first, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed which will prepare the usage of service stubs.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
def tearDown(self):
# Never forget to deactivate the testbed once the tests are
# completed. Otherwise the original stubs will not be restored.
self.testbed.deactivate()
def testInsertEntity(self):
# Because we use the datastore stub, this put() does not have
# permanent side effects.
TestModel().put()
fetched_entities = TestModel.all().fetch(2)
self.assertEqual(1, len(fetched_entities))
self.assertEqual(42, fetched_entities[0].number)
'''
Enable stubs and disable services
---------------------------------
This module allows you to use stubs for the following services:
- capability_service
- channel
- datastore_v3 (aka datastore)
- images (only for dev_appserver)
- mail (only for dev_appserver)
- memcache
- taskqueue
- urlfetch
- user
- xmpp
To use a particular service stub, call self.init_SERVICENAME_stub().
This will replace calls to the service with calls to the service
stub. If you want to disable any calls to a particular service, call
self.init_SERVICENAME_stub(enable=False). This can be useful if you
want to test code that must not use a certain service.
Environment variables
---------------------
App Engine service stubs often depend on environment variables. For
example, the datastore stub uses os.environ['APPLICATION_ID'] to store
entities linked to a particular app. testbed will use default values
if nothing else is provided, but you can change those with
self.setup_env().
"""
import os
import unittest
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_file_stub
try:
from google.appengine.api import mail_stub
except AttributeError:
mail_stub = None
from google.appengine.api import request_info
from google.appengine.api import urlfetch_stub
from google.appengine.api import user_service_stub
from google.appengine.api.app_identity import app_identity_stub
from google.appengine.api.blobstore import blobstore_stub
from google.appengine.api.blobstore import dict_blob_storage
from google.appengine.api.capabilities import capability_stub
from google.appengine.api.channel import channel_service_stub
from google.appengine.api.files import file_service_stub
try:
from google.appengine.api.images import images_stub
except ImportError:
images_stub = None
try:
from google.appengine.api.logservice import logservice_stub
except ImportError:
logservice_stub = None
from google.appengine.api.memcache import memcache_stub
from google.appengine.api.modules import modules_stub
try:
from google.appengine.api.search import simple_search_stub
except ImportError:
simple_search_stub = None
from google.appengine.api.taskqueue import taskqueue_stub
from google.appengine.api.xmpp import xmpp_service_stub
try:
from google.appengine.datastore import datastore_sqlite_stub
except ImportError:
datastore_sqlite_stub = None
from google.appengine.datastore import datastore_stub_util
from google.appengine.datastore import datastore_v4_stub
from google.appengine.ext.cloudstorage import common as gcs_common
from google.appengine.ext.cloudstorage import stub_dispatcher as gcs_dispatcher
DEFAULT_ENVIRONMENT = {
'APPLICATION_ID': 'testbed-test',
'AUTH_DOMAIN': 'gmail.com',
'HTTP_HOST': 'testbed.example.com',
'CURRENT_MODULE_ID': 'default',
'CURRENT_VERSION_ID': 'testbed-version',
'REQUEST_ID_HASH': 'testbed-request-id-hash',
'REQUEST_LOG_ID': '7357B3D7091D',
'SERVER_NAME': 'testbed.example.com',
'SERVER_SOFTWARE': 'Development/1.0 (testbed)',
'SERVER_PORT': '80',
'USER_EMAIL': '',
'USER_ID': '',
}
# Deprecated legacy aliases for default environment variables. New code
DEFAULT_APP_ID = DEFAULT_ENVIRONMENT['APPLICATION_ID']
DEFAULT_AUTH_DOMAIN = DEFAULT_ENVIRONMENT['AUTH_DOMAIN']
DEFAULT_SERVER_NAME = DEFAULT_ENVIRONMENT['SERVER_NAME']
DEFAULT_SERVER_SOFTWARE = DEFAULT_ENVIRONMENT['SERVER_SOFTWARE']
DEFAULT_SERVER_PORT = DEFAULT_ENVIRONMENT['SERVER_PORT']
APP_IDENTITY_SERVICE_NAME = 'app_identity_service'
BLOBSTORE_SERVICE_NAME = 'blobstore'
CAPABILITY_SERVICE_NAME = 'capability_service'
CHANNEL_SERVICE_NAME = 'channel'
DATASTORE_SERVICE_NAME = 'datastore_v3'
FILES_SERVICE_NAME = 'file'
IMAGES_SERVICE_NAME = 'images'
LOG_SERVICE_NAME = 'logservice'
MAIL_SERVICE_NAME = 'mail'
MEMCACHE_SERVICE_NAME = 'memcache'
TASKQUEUE_SERVICE_NAME = 'taskqueue'
URLFETCH_SERVICE_NAME = 'urlfetch'
USER_SERVICE_NAME = 'user'
XMPP_SERVICE_NAME = 'xmpp'
SEARCH_SERVICE_NAME = 'search'
MODULES_SERVICE_NAME = 'modules'
INIT_STUB_METHOD_NAMES = {
APP_IDENTITY_SERVICE_NAME: 'init_app_identity_stub',
BLOBSTORE_SERVICE_NAME: 'init_blobstore_stub',
CAPABILITY_SERVICE_NAME: 'init_capability_stub',
CHANNEL_SERVICE_NAME: 'init_channel_stub',
DATASTORE_SERVICE_NAME: 'init_datastore_v3_stub',
FILES_SERVICE_NAME: 'init_files_stub',
IMAGES_SERVICE_NAME: 'init_images_stub',
LOG_SERVICE_NAME: 'init_logservice_stub',
MAIL_SERVICE_NAME: 'init_mail_stub',
MEMCACHE_SERVICE_NAME: 'init_memcache_stub',
TASKQUEUE_SERVICE_NAME: 'init_taskqueue_stub',
URLFETCH_SERVICE_NAME: 'init_urlfetch_stub',
USER_SERVICE_NAME: 'init_user_stub',
XMPP_SERVICE_NAME: 'init_xmpp_stub',
SEARCH_SERVICE_NAME: 'init_search_stub',
MODULES_SERVICE_NAME: 'init_modules_stub'
}
SUPPORTED_SERVICES = sorted(INIT_STUB_METHOD_NAMES)
AUTO_ID_POLICY_SEQUENTIAL = datastore_stub_util.SEQUENTIAL
AUTO_ID_POLICY_SCATTERED = datastore_stub_util.SCATTERED
def urlfetch_to_gcs_stub(url, payload, method, headers, request, response,
follow_redirects=False, deadline=None,
validate_certificate=None):
"""Forwards gcs urlfetch requests to gcs_dispatcher."""
headers_map = dict(
(header.key().lower(), header.value()) for header in headers)
result = gcs_dispatcher.dispatch(method, headers_map, url, payload)
response.set_statuscode(result.status_code)
response.set_content(result.content[:urlfetch_stub.MAX_RESPONSE_SIZE])
for k, v in result.headers.iteritems():
if k.lower() == 'content-length' and method != 'HEAD':
v = len(response.content())
header_proto = response.add_header()
header_proto.set_key(k)
header_proto.set_value(str(v))
if len(result.content) > urlfetch_stub.MAX_RESPONSE_SIZE:
response.set_contentwastruncated(True)
def urlmatcher_for_gcs_stub(url):
"""Determines whether a url should be handled by gcs stub."""
return url.startswith(gcs_common.local_api_url())
GCS_URLMATCHERS_TO_FETCH_FUNCTIONS = [
(urlmatcher_for_gcs_stub, urlfetch_to_gcs_stub)]
class Error(Exception):
"""Base testbed error type."""
class NotActivatedError(Error):
"""Raised if the used testbed instance is not activated."""
class StubNotSupportedError(Error):
"""Raised if an unsupported service stub is accessed."""
class Testbed(object):
"""Class providing APIs to manipulate stubs for testing.
This class allows to replace App Engine services with fake stub
implementations. These stubs act like the actual APIs but do not
invoke the replaced services.
In order to use a fake service stub or disable a real service,
invoke the corresponding 'init_*_stub' methods of this class.
"""
def __init__(self):
self._activated = False
self._enabled_stubs = {}
self._blob_storage = None
def activate(self):
"""Activate the testbed.
Invoking this method will also assign default values to
environment variables required by App Engine services such as
os.environ['APPLICATION_ID']. You can set custom values with
setup_env().
"""
self._orig_env = dict(os.environ)
self.setup_env()
self._original_stub_map = apiproxy_stub_map.apiproxy
self._test_stub_map = apiproxy_stub_map.APIProxyStubMap()
internal_map = self._original_stub_map._APIProxyStubMap__stub_map
self._test_stub_map._APIProxyStubMap__stub_map = dict(internal_map)
apiproxy_stub_map.apiproxy = self._test_stub_map
self._activated = True
def deactivate(self):
"""Deactivate the testbed.
This method will restore the API proxy and environment variables to the
state before activate() was called.
Raises:
NotActivatedError: If called before activate() was called.
"""
if not self._activated:
raise NotActivatedError('The testbed is not activated.')
for service_name, deactivate_callback in self._enabled_stubs.iteritems():
if deactivate_callback:
deactivate_callback(self._test_stub_map.GetStub(service_name))
apiproxy_stub_map.apiproxy = self._original_stub_map
self._enabled_stubs = {}
os.environ.clear()
os.environ.update(self._orig_env)
self._blob_storage = None
self._activated = False
def setup_env(self, overwrite=False, **kwargs):
"""Set up environment variables.
Sets default and custom environment variables. By default, all the items in
DEFAULT_ENVIRONMENT will be created without being specified. To set a value
other than the default, or to pass a custom environment variable, pass a
corresponding keyword argument:
testbed_instance.setup_env() # All defaults.
testbed_instance.setup_env(auth_domain='custom') # All defaults, overriding
# AUTH_DOMAIN.
testbed_instance.setup_env(custom='foo') # All defaults, plus a custom
# os.environ['CUSTOM'] = 'foo'.
To overwrite values set by a previous invocation, pass overwrite=True. This
will not result in an OVERWRITE entry in os.environ.
Args:
overwrite: boolean. Whether to overwrite items with corresponding entries
in os.environ.
**kwargs: environment variables to set. The name of the argument will be
uppercased and used as a key in os.environ.
"""
merged_kwargs = {}
for key, value in kwargs.iteritems():
if key == 'app_id':
key = 'APPLICATION_ID'
merged_kwargs[key.upper()] = value
if not overwrite:
for key, value in DEFAULT_ENVIRONMENT.iteritems():
if key not in merged_kwargs:
merged_kwargs[key] = value
for key, value in merged_kwargs.iteritems():
if overwrite or key not in os.environ:
os.environ[key] = value
def _register_stub(self, service_name, stub, deactivate_callback=None):
"""Register a service stub.
Args:
service_name: The name of the service the stub represents.
stub: The stub.
deactivate_callback: An optional function to call when deactivating the
stub. Must accept the stub as the only argument.
Raises:
NotActivatedError: The testbed is not activated.
"""
self._disable_stub(service_name)
self._test_stub_map.RegisterStub(service_name, stub)
self._enabled_stubs[service_name] = deactivate_callback
def _disable_stub(self, service_name):
"""Disable a service stub.
Args:
service_name: The name of the service to disable.
Raises:
NotActivatedError: The testbed is not activated.
"""
if not self._activated:
raise NotActivatedError('The testbed is not activated.')
deactivate_callback = self._enabled_stubs.pop(service_name, None)
if deactivate_callback:
deactivate_callback(self._test_stub_map.GetStub(service_name))
if service_name in self._test_stub_map._APIProxyStubMap__stub_map:
del self._test_stub_map._APIProxyStubMap__stub_map[service_name]
def get_stub(self, service_name):
"""Get the stub for a service.
Args:
service_name: The name of the service.
Returns:
The stub for 'service_name'.
Raises:
NotActivatedError: The testbed is not activated.
StubNotSupportedError: The service is not supported by testbed.
StubNotEnabledError: The service stub has not been enabled.
"""
if not self._activated:
raise NotActivatedError('The testbed is not activated.')
if service_name not in SUPPORTED_SERVICES:
msg = 'The "%s" service is not supported by testbed' % service_name
raise StubNotSupportedError(msg)
if service_name not in self._enabled_stubs:
return None
return self._test_stub_map.GetStub(service_name)
def init_app_identity_stub(self, enable=True):
"""Enable the app identity stub.
Args:
enable: True, if the fake service should be enabled, False if real
service should be disabled.
"""
if not enable:
self._disable_stub(APP_IDENTITY_SERVICE_NAME)
return
stub = app_identity_stub.AppIdentityServiceStub()
self._register_stub(APP_IDENTITY_SERVICE_NAME, stub)
def _get_blob_storage(self):
"""Creates a blob storage for stubs if needed."""
if self._blob_storage is None:
self._blob_storage = dict_blob_storage.DictBlobStorage()
return self._blob_storage
def init_blobstore_stub(self, enable=True):
"""Enable the blobstore stub.
Args:
enable: True, if the fake service should be enabled, False if real
service should be disabled.
"""
if not enable:
self._disable_stub(BLOBSTORE_SERVICE_NAME)
return
stub = blobstore_stub.BlobstoreServiceStub(self._get_blob_storage())
self._register_stub(BLOBSTORE_SERVICE_NAME, stub)
def init_capability_stub(self, enable=True):
"""Enable the capability stub.
Args:
enable: True, if the fake service should be enabled, False if real
service should be disabled.
"""
if not enable:
self._disable_stub(CAPABILITY_SERVICE_NAME)
return
stub = capability_stub.CapabilityServiceStub()
self._register_stub(CAPABILITY_SERVICE_NAME, stub)
def init_channel_stub(self, enable=True):
"""Enable the channel stub.
Args:
enable: True, if the fake service should be enabled, False if real
service should be disabled.
"""
if not enable:
self._disable_stub(CHANNEL_SERVICE_NAME)
return
stub = channel_service_stub.ChannelServiceStub()
self._register_stub(CHANNEL_SERVICE_NAME, stub)
def init_datastore_v3_stub(self, enable=True, datastore_file=None,
use_sqlite=False,
auto_id_policy=AUTO_ID_POLICY_SEQUENTIAL,
**stub_kw_args):
"""Enable the datastore stub.
The 'datastore_file' argument can be the path to an existing
datastore file, or None (default) to use an in-memory datastore
that is initially empty. If you use the sqlite stub and have
'datastore_file' defined, changes you apply in a test will be
written to the file. If you use the default datastore stub,
changes are _not_ saved to disk unless you set save_changes=True.
Note that you can only access those entities of the datastore file
which have the same application ID associated with them as the
test run. You can change the application ID for a test with
setup_env().
Args:
enable: True if the fake service should be enabled, False if real
service should be disabled.
datastore_file: Filename of a dev_appserver datastore file.
use_sqlite: True to use the Sqlite stub, False (default) for file stub.
auto_id_policy: How datastore stub assigns auto IDs. Either
AUTO_ID_POLICY_SEQUENTIAL or AUTO_ID_POLICY_SCATTERED.
stub_kw_args: Keyword arguments passed on to the service stub.
"""
if not enable:
self._disable_stub(DATASTORE_SERVICE_NAME)
self._disable_stub('datastore_v4')
return
if use_sqlite:
if datastore_sqlite_stub is None:
raise StubNotSupportedError(
'The sqlite stub is not supported in production.')
stub = datastore_sqlite_stub.DatastoreSqliteStub(
os.environ['APPLICATION_ID'],
datastore_file,
use_atexit=False,
auto_id_policy=auto_id_policy,
**stub_kw_args)
else:
stub_kw_args.setdefault('save_changes', False)
stub = datastore_file_stub.DatastoreFileStub(
os.environ['APPLICATION_ID'],
datastore_file,
use_atexit=False,
auto_id_policy=auto_id_policy,
**stub_kw_args)
self._register_stub(DATASTORE_SERVICE_NAME, stub,
self._deactivate_datastore_v3_stub)
v4_stub = datastore_v4_stub.DatastoreV4Stub(os.environ['APPLICATION_ID'])
self._register_stub('datastore_v4', v4_stub)
def _deactivate_datastore_v3_stub(self, stub):
stub.Write()
def init_files_stub(self, enable=True):
"""Enable files api stub.
Args:
enable: True, if the fake service should be enabled, False if real
service should be disabled.
"""
if not enable:
self._disable_stub(FILES_SERVICE_NAME)
return
stub = file_service_stub.FileServiceStub(self._get_blob_storage())
self._register_stub(FILES_SERVICE_NAME, stub)
def init_images_stub(self, enable=True, **stub_kwargs):
"""Enable the images stub.
The images service stub is only available in dev_appserver because
it uses the PIL library.
Args:
enable: True, if the fake service should be enabled, False if real
service should be disabled.
stub_kwargs: Keyword arguments passed on to the service stub.
"""
if not enable:
self._disable_stub(IMAGES_SERVICE_NAME)
return
if images_stub is None:
msg = ('Could not initialize images API; you are likely '
'missing the Python "PIL" module.')
raise StubNotSupportedError(msg)
stub = images_stub.ImagesServiceStub(**stub_kwargs)
self._register_stub(IMAGES_SERVICE_NAME, stub)
def init_logservice_stub(self, enable=True):
"""Enable the log service stub.
Args:
enable: True, if the fake service should be enabled, False if real
service should be disabled.
Raises:
StubNotSupportedError: The logservice stub is unvailable.
"""
if not enable:
self._disable_stub(LOG_SERVICE_NAME)
return
if logservice_stub is None:
raise StubNotSupportedError(
'The logservice stub is not supported in production.')
stub = logservice_stub.LogServiceStub()
self._register_stub(LOG_SERVICE_NAME, stub)
def init_mail_stub(self, enable=True, **stub_kw_args):
"""Enable the mail stub.
The email service stub is only available in dev_appserver because
it uses the subprocess module.
Args:
enable: True, if the fake service should be enabled, False if real
service should be disabled.
stub_kw_args: Keyword arguments passed on to the service stub.
"""
if not enable:
self._disable_stub(MAIL_SERVICE_NAME)
return
stub = mail_stub.MailServiceStub(**stub_kw_args)
self._register_stub(MAIL_SERVICE_NAME, stub)
def init_memcache_stub(self, enable=True):
"""Enable the memcache stub.
Args:
enable: True, if the fake service should be enabled, False if real
service should be disabled.
"""
if not enable:
self._disable_stub(MEMCACHE_SERVICE_NAME)
return
stub = memcache_stub.MemcacheServiceStub()
self._register_stub(MEMCACHE_SERVICE_NAME, stub)
def init_taskqueue_stub(self, enable=True, **stub_kw_args):
"""Enable the taskqueue stub.
Args:
enable: True, if the fake service should be enabled, False if real
service should be disabled.
stub_kw_args: Keyword arguments passed on to the service stub.
"""
if not enable:
self._disable_stub(TASKQUEUE_SERVICE_NAME)
return
stub = taskqueue_stub.TaskQueueServiceStub(**stub_kw_args)
self._register_stub(TASKQUEUE_SERVICE_NAME, stub)
def init_urlfetch_stub(self, enable=True):
"""Enable the urlfetch stub.
The urlfetch service stub uses the urllib module to make
requests. Because on appserver urllib also relies the urlfetch
infrastructure, using this stub will have no effect.
Args:
enable: True, if the fake service should be enabled, False if real
service should be disabled.
"""
if not enable:
self._disable_stub(URLFETCH_SERVICE_NAME)
return
urlmatchers_to_fetch_functions = []
urlmatchers_to_fetch_functions.extend(
GCS_URLMATCHERS_TO_FETCH_FUNCTIONS)
stub = urlfetch_stub.URLFetchServiceStub(
urlmatchers_to_fetch_functions=urlmatchers_to_fetch_functions)
self._register_stub(URLFETCH_SERVICE_NAME, stub)
def init_user_stub(self, enable=True, **stub_kw_args):
"""Enable the users stub.
Args:
enable: True, if the fake service should be enabled, False if real
service should be disabled.
stub_kw_args: Keyword arguments passed on to the service stub.
"""
if not enable:
self._disable_stub(USER_SERVICE_NAME)
return
stub = user_service_stub.UserServiceStub(**stub_kw_args)
self._register_stub(USER_SERVICE_NAME, stub)
def init_xmpp_stub(self, enable=True):
"""Enable the xmpp stub.
Args:
enable: True, if the fake service should be enabled, False if real
service should be disabled.
"""
if not enable:
self._disable_stub(XMPP_SERVICE_NAME)
return
stub = xmpp_service_stub.XmppServiceStub()
self._register_stub(XMPP_SERVICE_NAME, stub)
def init_search_stub(self, enable=True):
"""Enable the search stub.
Args:
enable: True, if the fake service should be enabled, False if real
service should be disabled.
"""
if not enable:
self._disable_stub(SEARCH_SERVICE_NAME)
return
if simple_search_stub is None:
raise StubNotSupportedError('Could not initialize search API')
stub = simple_search_stub.SearchServiceStub()
self._register_stub(SEARCH_SERVICE_NAME, stub)
def init_modules_stub(self, enable=True):
"""Enable the modules stub.
Args:
enable: True, if the fake service should be enabled, False if real
service should be disabled.
"""
if not enable:
self._disable_stub(MODULES_SERVICE_NAME)
return
stub = modules_stub.ModulesServiceStub(request_info._LocalRequestInfo())
self._register_stub(MODULES_SERVICE_NAME, stub)
def _init_stub(self, service_name, *args, **kwargs):
"""Enable a stub by service name.
Args:
service_name: Name of service to initialize. This name should be the
name used by the service stub.
Additional arguments are passed along to the specific stub initializer.
Raises:
NotActivatedError: When this function is called before testbed is
activated or after it is deactivated.
StubNotSupportedError: When an unsupported service_name is provided.
"""
if not self._activated:
raise NotActivatedError('The testbed is not activated.')
method_name = INIT_STUB_METHOD_NAMES.get(service_name, None)
if method_name is None:
msg = 'The "%s" service is not supported by testbed' % service_name
raise StubNotSupportedError(msg)
method = getattr(self, method_name)
method(*args, **kwargs)
def init_all_stubs(self, enable=True):
"""Enable all known testbed stubs.
Args:
enable: True, if the fake services should be enabled, False if real
services should be disabled.
"""
for service_name in SUPPORTED_SERVICES:
self._init_stub(service_name, enable)
|
|
#!/usr/bin/python2.4
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""compact_innodb.py [ --num_readers N ] [ --num_writers N ] \\
[ --dump_dir /path/to/save/dumps ] host:dbuser:dbpass:
A script to mysqldump and reload the InnoDB tables in a given database.
This script takes a given database and performs a mysqldump on all of
the InnoDB tables present. It then drops the innodb tables, stops
mysql, deletes the innodb datafile and logs, and then restarts mysql.
It then uses the mysql utility to reload the dumps.
The script can be limited to only doing the dump or only doing the restore.
Among other things, this is used to restore data into MySQL 5 that had
been dumped from MySQL 4.
"""
__author__ = 'chip@google.com (Chip Turner)'
import glob
import MySQLdb
import os
import re
import subprocess
import sys
import time
from gmt import command_pool
from gmt import config_helper
config_helper.Init() # here instead of __main__ because we need the
# defaults for our string defines
from gmt import compat_logging as logging
from gmt import compat_flags as flags
from gmt import dbspec_lib
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_readers", 6, "number of parallel readers (mysqldump)")
flags.DEFINE_integer("num_writers", 4, "number of parallel writers")
flags.DEFINE_string("dump_dir", "/export/hda3/tmp/sql-dumps",
"directory to store .sql.gz dumps")
flags.DEFINE_boolean("do_dump", True, "dump the data")
flags.DEFINE_boolean("do_reload", True, "reload the data")
flags.DEFINE_boolean("tolerate_lossy_fp", False,
"tolerate loss of precision on reload of FP types")
flags.DEFINE_string("mysql_bin_dir", '', "path to MySQL binaries")
flags.DEFINE_boolean("wipe_innodb", True,
"drop tables, restart MySQL")
flags.DEFINE_string("user", config_helper.GetGlobal("user"),
"User to connect to the databases as",
short_name="u")
flags.DEFINE_string("password", config_helper.GetGlobal("password"),
"Password to use when connecting as user",
short_name="p")
flags.DEFINE_string("my_cnf_location", "/etc/my.cnf",
"where my.cnf is stored")
flags.DEFINE_string("mysql_root", "/var/lib/mysql",
"location of mysql's root, aka, where it stores data")
DUMP_RE = re.compile(r'(\w+)-(\w+)\.sql\.gz')
def _CheckReplicationConfigured(dbspec):
"""
Test whether replication is enabled on this slave.
Replication may be broken; this checks if it is configured.
Args:
dbspec - database to check
Returns:
boolean indicating if replication is enabled and running
"""
rows = dbspec.execute("SHOW SLAVE STATUS")
if len(rows) == 0:
return False
if rows[0][9] == 'No' and rows[0][10] == 'No':
return False
return True
def DumpTables(dbspec, num_readers, dump_dir, mysql_bin_dir, lossy_fp):
dbs = dbspec.getDatabases()
db_info = {}
for db in dbs:
db_info[db] = {}
for table in dbspec.getTables(db):
db_info[db][table.name] = table
todo = []
for db in db_info:
for table in db_info[db].values():
if table.table_type == 'InnoDB':
todo.append((db, table))
logging.info("Found %d tables to compact" % len(todo))
# sort descending, first by size then by name
todo.sort(lambda a,b: cmp(b[1].size, a[1].size) or cmp(b[1].name, a[1].name))
pool = command_pool.CommandPool(num_readers)
for db, table in todo:
# we use PIPESTATUS to get the return of mysqldump, not gzip.
# also, --lossless-fp is an extension to our mysqldump that allows
# for truly lossless dumps and reloads. this indirectly ensures
# we have the proper mysql in place for the dump, as the flag to
# mysqldump also requires a mysqld change that is accompanied in
# the same RPMs.
if len(mysql_bin_dir):
cmdpath = "%s/mysqldump" % mysql_bin_dir
else:
cmdpath = "mysqldump"
if lossy_fp:
lossy = ''
else:
lossy = '--lossless_fp'
cmd = (("%s --opt -v -u%s -p%s -h%s %s --database %s --tables %s | "
"gzip --fast > %s/%s-%s.sql.gz; exit ${PIPESTATUS[0]}") %
(cmdpath, dbspec.user, dbspec.password,
dbspec.host, lossy, db, table.name, dump_dir, db, table.name))
pool.submit(cmd, (db, table))
if not pool.run():
for failure in pool.failures:
logging.error("Dump of %s.%s failed: %s" %
(failure.data[0], failure.data[1], failure.returncode))
logging.error("Output: %s\n" % failure.output.read())
logging.fatal("One or more dumps failed; aborting.")
def GetTableNamesFromDumpFiles(dump_dir):
logging.info("Get table names from dump files in %s" % dump_dir)
dump_files = glob.glob(os.path.join(dump_dir, '*.sql.gz'))
todo = []
for pathname in dump_files:
filename = pathname[len(dump_dir) + 1:]
logging.debug('Found filename %s' % filename)
match_object = DUMP_RE.match(filename)
if not match_object or len(match_object.groups()) != 2:
logging.fatal('Match failed for %s' % filename)
db = match_object.groups()[0]
table = match_object.groups()[1]
logging.info('Found %s.%s' % (db, table))
todo.append((db, table))
return todo
def ReloadTables(dbspec, num_writers, dump_dir, mysql_bin_dir, wipe_innodb):
todo = GetTableNamesFromDumpFiles(dump_dir)
if wipe_innodb:
logging.info("Dropping innodb tables...")
for db, table in todo:
logging.debug('Drop %s.%s' % (db, table))
dbspec.execute("DROP TABLE %s.%s" % (db, table))
logging.info("Stopping mysql...")
dbspec.stopMySql()
logging.info("Deleting innodb data files...")
retcode = subprocess.call("sudo rm -f %s/innodb_data*; "
"sudo rm -f %s/innodb_logs/*"
% (FLAGS.mysql_root, FLAGS.mysql_root),
shell=True)
if retcode != 0:
logging.fatal("Error deleting innodb datafiles")
logging.info("Starting mysql...")
dbspec.startMySql()
time.sleep(5)
logging.info("Restoring tables...")
pool = command_pool.CommandPool(num_writers)
if len(mysql_bin_dir):
cmdpath = "%s/mysql" % mysql_bin_dir
else:
cmdpath = "mysql"
for db, table in todo:
cmd = (("zcat %s/%s-%s.sql.gz | %s -u%s -p%s -h%s -A %s") %
(dump_dir, db, table, cmdpath,
dbspec.user, dbspec.password, dbspec.host, db,
))
logging.debug('Submit %s' % cmd)
pool.submit(cmd, (db, table))
if not pool.run():
for failure in pool.failures:
logging.error("Restore of %s.%s failed: %s" %
(failure.data[0], failure.data[1], failure.returncode))
logging.error("Output: %s\n" % failure.output.read())
logging.fatal("One or more restores failed; aborting.")
def main(argv):
if len(argv) != 2:
flags.ShowUsage()
sys.exit(1)
dbspec = dbspec_lib.DatabaseSpec(argv[1], FLAGS.user, FLAGS.password)
if not os.path.exists(FLAGS.dump_dir):
logging.fatal("Specified --dump_dir, %s, does not exist" % FLAGS.dump_dir)
if not os.path.isdir(FLAGS.dump_dir):
logging.fatal("Specified --dump_dir, %s, is not a directory" %
FLAGS.dump_dir)
if os.listdir(FLAGS.dump_dir) and FLAGS.do_dump:
logging.fatal("Specified --dump_dir, %s, is not empty" % FLAGS.dump_dir)
if _CheckReplicationConfigured(dbspec):
logging.fatal("Replication must not be configured during a compaction")
# ensure we are running a mysql that properly lets us dump and
# restore floating point numbers
rows = dbspec.execute("SELECT IEEE754_TO_STRING(1.000)", failOnError=0)
if not rows:
if not FLAGS.tolerate_lossy_fp:
logging.fatal("Your version of mysql does not support precise "
"IEEE754 string representations. Dump aborted.")
else:
logging.error("Your version of mysql does not support precise "
"IEEE754 string representations.")
else:
logging.info("Your mysql supports IEEE754_TO_STRING, dump proceding")
if FLAGS.do_dump:
DumpTables(dbspec, FLAGS.num_readers, FLAGS.dump_dir, FLAGS.mysql_bin_dir,
FLAGS.tolerate_lossy_fp)
if FLAGS.do_reload:
ReloadTables(dbspec, FLAGS.num_writers, FLAGS.dump_dir, FLAGS.mysql_bin_dir,
FLAGS.wipe_innodb)
logging.info("Done!")
if __name__ == "__main__":
new_argv = flags.ParseArgs(sys.argv[1:])
main([sys.argv[0]] + new_argv)
|
|
"""
sentry.utils.models
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import hashlib
import logging
from django.db import models, router, transaction, IntegrityError
from django.db.models import signals
from django.db.models.expressions import ExpressionNode
from django.utils.encoding import smart_str
from sentry.utils.compat import pickle
from sentry.utils.db import resolve_expression_node
from sentry.utils.strings import decompress, compress
logger = logging.getLogger(__name__)
class QueryError(Exception):
pass
def merge_account(from_user, to_user):
# TODO: we could discover relations automatically and make this useful
from sentry.models import (GroupBookmark, Project, ProjectKey, Team, TeamMember,
UserOption)
for obj in ProjectKey.objects.filter(user=from_user):
obj.update(user=to_user)
for obj in TeamMember.objects.filter(user=from_user):
obj.update(user=to_user)
for obj in Project.objects.filter(owner=from_user):
obj.update(owner=to_user)
for obj in Team.objects.filter(owner=from_user):
obj.update(owner=to_user)
for obj in GroupBookmark.objects.filter(user=from_user):
obj.update(user=to_user)
for obj in UserOption.objects.filter(user=from_user):
obj.update(user=to_user)
def update(self, using=None, **kwargs):
"""
Updates specified attributes on the current instance.
"""
assert self.pk, "Cannot update an instance that has not yet been created."
using = using or router.db_for_write(self.__class__, instance=self)
for field in self._meta.fields:
if getattr(field, 'auto_now', False) and field.name not in kwargs:
kwargs[field.name] = field.pre_save(self, False)
affected = self.__class__._base_manager.using(using).filter(pk=self.pk).update(**kwargs)
for k, v in kwargs.iteritems():
if isinstance(v, ExpressionNode):
v = resolve_expression_node(self, v)
setattr(self, k, v)
if affected == 1:
signals.post_save.send(sender=self.__class__, instance=self, created=False)
return True
elif affected == 0:
return False
elif affected < 0:
raise ValueError("Somehow we have updated a negative amount of rows, you seem to have a problem with your db backend.")
else:
raise ValueError("Somehow we have updated multiple rows, and you are now royally fucked.")
update.alters_data = True
def __prep_value(model, key, value):
if isinstance(value, models.Model):
value = value.pk
else:
value = unicode(value)
return value
def __prep_key(model, key):
if key == 'pk':
return model._meta.pk.name
return key
def make_key(model, prefix, kwargs):
kwargs_bits = []
for k, v in sorted(kwargs.iteritems()):
k = __prep_key(model, k)
v = smart_str(__prep_value(model, k, v))
kwargs_bits.append('%s=%s' % (k, v))
kwargs_bits = ':'.join(kwargs_bits)
return '%s:%s:%s' % (prefix, model.__name__, hashlib.md5(kwargs_bits).hexdigest())
def create_or_update(model, using=None, **kwargs):
"""
Similar to get_or_create, either updates a row or creates it.
The result will be (rows affected, False), if the row was not created,
or (instance, True) if the object is new.
>>> create_or_update(MyModel, key='value', defaults={
>>> 'value': F('value') + 1,
>>> })
"""
defaults = kwargs.pop('defaults', {})
if not using:
using = router.db_for_write(model)
objects = model.objects.using(using)
affected = objects.filter(**kwargs).update(**defaults)
if affected:
return affected, False
create_kwargs = kwargs.copy()
inst = objects.model()
for k, v in defaults.iteritems():
if isinstance(v, ExpressionNode):
create_kwargs[k] = resolve_expression_node(inst, v)
else:
create_kwargs[k] = v
try:
return objects.create(**create_kwargs), True
except IntegrityError:
transaction.rollback_unless_managed(using=using)
affected = objects.filter(**kwargs).update(**defaults)
if not affected:
raise QueryError('No rows updated or created for kwargs: %r' % kwargs)
return affected, False
class BoundedAutoField(models.AutoField):
MAX_VALUE = 2147483647
def get_prep_value(self, value):
if value:
value = int(value)
assert value <= self.MAX_VALUE
return super(BoundedAutoField, self).get_prep_value(value)
def south_field_triple(self):
"Returns a suitable description of this field for South."
from south.modelsinspector import introspector
field_class = "django.db.models.fields.AutoField"
args, kwargs = introspector(self)
return (field_class, args, kwargs)
class BoundedIntegerField(models.IntegerField):
MAX_VALUE = 2147483647
def get_prep_value(self, value):
if value:
value = int(value)
assert value <= self.MAX_VALUE
return super(BoundedIntegerField, self).get_prep_value(value)
def south_field_triple(self):
"Returns a suitable description of this field for South."
from south.modelsinspector import introspector
field_class = "django.db.models.fields.IntegerField"
args, kwargs = introspector(self)
return (field_class, args, kwargs)
class BoundedBigIntegerField(models.BigIntegerField):
MAX_VALUE = 9223372036854775807
def get_prep_value(self, value):
if value:
value = int(value)
assert value <= self.MAX_VALUE
return super(BoundedBigIntegerField, self).get_prep_value(value)
def south_field_triple(self):
"Returns a suitable description of this field for South."
from south.modelsinspector import introspector
field_class = "django.db.models.fields.BigIntegerField"
args, kwargs = introspector(self)
return (field_class, args, kwargs)
class BoundedPositiveIntegerField(models.PositiveIntegerField):
MAX_VALUE = 2147483647
def get_prep_value(self, value):
if value:
value = int(value)
assert value <= self.MAX_VALUE
return super(BoundedPositiveIntegerField, self).get_prep_value(value)
def south_field_triple(self):
"Returns a suitable description of this field for South."
from south.modelsinspector import introspector
field_class = "django.db.models.fields.PositiveIntegerField"
args, kwargs = introspector(self)
return (field_class, args, kwargs)
class GzippedDictField(models.TextField):
"""
Slightly different from a JSONField in the sense that the default
value is a dictionary.
"""
__metaclass__ = models.SubfieldBase
def to_python(self, value):
if isinstance(value, basestring) and value:
try:
value = pickle.loads(decompress(value))
except Exception, e:
logger.exception(e)
return {}
elif not value:
return {}
return value
def get_prep_value(self, value):
if not value and self.null:
# save ourselves some storage
return None
return compress(pickle.dumps(value))
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_prep_value(value)
def south_field_triple(self):
"Returns a suitable description of this field for South."
from south.modelsinspector import introspector
field_class = "django.db.models.fields.TextField"
args, kwargs = introspector(self)
return (field_class, args, kwargs)
class Model(models.Model):
id = BoundedAutoField(primary_key=True)
class Meta:
abstract = True
update = update
__UNSAVED = object()
def __init__(self, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
self._update_tracked_data()
def __getstate__(self):
d = self.__dict__.copy()
# we cant serialize weakrefs
d.pop('_Model__data', None)
return d
def __reduce__(self):
(model_unpickle, stuff, _) = super(Model, self).__reduce__()
return (model_unpickle, stuff, self.__getstate__())
def __setstate__(self, state):
self.__dict__.update(state)
self._update_tracked_data()
def __get_field_value(self, field):
if isinstance(field, models.ForeignKey):
return getattr(self, field.column)
return getattr(self, field.name)
def _update_tracked_data(self):
"Updates a local copy of attributes values"
if self.id:
self.__data = dict((f.column, self.__get_field_value(f)) for f in self._meta.fields)
else:
self.__data = self.__UNSAVED
def has_changed(self, field_name):
"Returns ``True`` if ``field`` has changed since initialization."
if self.__data is self.__UNSAVED:
return False
field = self._meta.get_field(field_name)
return self.__data.get(field_name) != self.__get_field_value(field)
def old_value(self, field_name):
"Returns the previous value of ``field``"
if self.__data is self.__UNSAVED:
return None
return self.__data.get(field_name)
def __model_post_save(instance, **kwargs):
if not isinstance(instance, Model):
return
instance._update_tracked_data()
signals.post_save.connect(__model_post_save)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright 2015 IBM Corp.
import copy
from unittest import mock
import testscenarios
from heatclient import exc
from heatclient.osc.v1 import event
from heatclient.tests.unit.osc.v1 import fakes
from heatclient.v1 import events
load_tests = testscenarios.load_tests_apply_scenarios
class TestEvent(fakes.TestOrchestrationv1):
def setUp(self):
super(TestEvent, self).setUp()
self.mock_client = self.app.client_manager.orchestration
self.event_client = self.app.client_manager.orchestration.events
self.stack_client = self.app.client_manager.orchestration.stacks
self.resource_client = self.app.client_manager.orchestration.resources
class TestEventShow(TestEvent):
scenarios = [
('table', dict(format='table')),
('shell', dict(format='shell')),
('value', dict(format='value')),
]
response = {
'event': {
"resource_name": "my_resource",
"event_time": "2015-11-11T15:23:47Z",
"links": [],
"logical_resource_id": "my_resource",
"resource_status": "CREATE_FAILED",
"resource_status_reason": "NotFound",
"physical_resource_id": "null",
"id": "474bfdf0-a450-46ec-a78a-0c7faa404073"
}
}
def setUp(self):
super(TestEventShow, self).setUp()
self.cmd = event.ShowEvent(self.app, None)
def test_event_show(self):
arglist = ['--format', self.format, 'my_stack', 'my_resource', '1234']
parsed_args = self.check_parser(self.cmd, arglist, [])
self.event_client.get.return_value = events.Event(None, self.response)
self.cmd.take_action(parsed_args)
self.event_client.get.assert_called_with(**{
'stack_id': 'my_stack',
'resource_name': 'my_resource',
'event_id': '1234'
})
def _test_not_found(self, error):
arglist = ['my_stack', 'my_resource', '1234']
parsed_args = self.check_parser(self.cmd, arglist, [])
ex = self.assertRaises(exc.CommandError, self.cmd.take_action,
parsed_args)
self.assertIn(error, str(ex))
def test_event_show_stack_not_found(self):
error = 'Stack not found'
self.stack_client.get.side_effect = exc.HTTPNotFound(error)
self._test_not_found(error)
def test_event_show_resource_not_found(self):
error = 'Resource not found'
self.stack_client.get.side_effect = exc.HTTPNotFound(error)
self._test_not_found(error)
def test_event_show_event_not_found(self):
error = 'Event not found'
self.stack_client.get.side_effect = exc.HTTPNotFound(error)
self._test_not_found(error)
class TestEventList(TestEvent):
defaults = {
'stack_id': 'my_stack',
'resource_name': None,
'filters': {},
'sort_dir': 'asc'
}
fields = ['resource_name', 'id', 'resource_status',
'resource_status_reason', 'event_time', 'physical_resource_id',
'logical_resource_id']
class MockEvent(object):
data = {
'event_time': '2015-11-13T10:02:17',
'id': '1234',
'logical_resource_id': 'resource1',
'physical_resource_id': '',
'resource_name': 'resource1',
'resource_status': 'CREATE_COMPLETE',
'resource_status_reason': 'state changed',
'stack_name': 'my_stack',
}
def __getattr__(self, key):
try:
return self.data[key]
except KeyError:
# hasattr() in python 3 expects an AttributeError to be raised
raise AttributeError
def setUp(self):
super(TestEventList, self).setUp()
self.cmd = event.ListEvent(self.app, None)
self.event = self.MockEvent()
self.event_client.list.return_value = [self.event]
self.resource_client.list.return_value = {}
def test_event_list_defaults(self):
arglist = ['my_stack', '--format', 'table']
parsed_args = self.check_parser(self.cmd, arglist, [])
columns, data = self.cmd.take_action(parsed_args)
self.event_client.list.assert_called_with(**self.defaults)
self.assertEqual(self.fields, columns)
def test_event_list_resource_nested_depth(self):
arglist = ['my_stack', '--resource', 'my_resource',
'--nested-depth', '3', '--format', 'table']
parsed_args = self.check_parser(self.cmd, arglist, [])
self.assertRaises(exc.CommandError, self.cmd.take_action, parsed_args)
def test_event_list_logical_resource_id(self):
arglist = ['my_stack', '--format', 'table']
del self.event.data['resource_name']
cols = copy.deepcopy(self.fields)
cols.pop()
cols[0] = 'logical_resource_id'
parsed_args = self.check_parser(self.cmd, arglist, [])
columns, data = self.cmd.take_action(parsed_args)
self.event_client.list.assert_called_with(**self.defaults)
self.assertEqual(cols, columns)
self.event.data['resource_name'] = 'resource1'
def test_event_list_nested_depth(self):
arglist = ['my_stack', '--nested-depth', '3', '--format', 'table']
kwargs = copy.deepcopy(self.defaults)
kwargs['nested_depth'] = 3
cols = copy.deepcopy(self.fields)
cols[-1] = 'stack_name'
cols.append('logical_resource_id')
parsed_args = self.check_parser(self.cmd, arglist, [])
columns, data = self.cmd.take_action(parsed_args)
self.event_client.list.assert_has_calls([
mock.call(**kwargs),
mock.call(**self.defaults)
])
self.assertEqual(cols, columns)
@mock.patch('osc_lib.utils.sort_items')
def test_event_list_sort(self, mock_sort_items):
arglist = ['my_stack', '--sort', 'resource_name:desc',
'--format', 'table']
parsed_args = self.check_parser(self.cmd, arglist, [])
mock_event = self.MockEvent()
mock_sort_items.return_value = [mock_event]
columns, data = self.cmd.take_action(parsed_args)
mock_sort_items.assert_called_with(mock.ANY,
"resource_name:desc")
self.event_client.list.assert_called_with(
filters={}, resource_name=None, sort_dir='desc',
sort_keys=['resource_name'], stack_id='my_stack')
self.assertEqual(self.fields, columns)
@mock.patch('osc_lib.utils.sort_items')
def test_event_list_sort_multiple(self, mock_sort_items):
arglist = ['my_stack', '--sort', 'resource_name:desc',
'--sort', 'id:asc', '--format', 'table']
parsed_args = self.check_parser(self.cmd, arglist, [])
mock_event = self.MockEvent()
mock_sort_items.return_value = [mock_event]
columns, data = self.cmd.take_action(parsed_args)
mock_sort_items.assert_called_with(mock.ANY,
"resource_name:desc,id:asc")
self.event_client.list.assert_called_with(
filters={}, resource_name=None, sort_dir='desc',
sort_keys=['resource_name', 'id'], stack_id='my_stack')
self.assertEqual(self.fields, columns)
@mock.patch('osc_lib.utils.sort_items')
def test_event_list_sort_default_key(self, mock_sort_items):
arglist = ['my_stack', '--sort', ':desc',
'--format', 'table']
parsed_args = self.check_parser(self.cmd, arglist, [])
mock_event = self.MockEvent()
mock_sort_items.return_value = [mock_event]
columns, data = self.cmd.take_action(parsed_args)
mock_sort_items.assert_called_with(mock.ANY, "event_time:desc")
self.event_client.list.assert_called_with(
filters={}, resource_name=None, sort_dir='desc', sort_keys=[],
stack_id='my_stack')
self.assertEqual(self.fields, columns)
@mock.patch('time.sleep')
def test_event_list_follow(self, sleep):
sleep.side_effect = [None, KeyboardInterrupt()]
arglist = ['--follow', 'my_stack']
expected = (
'2015-11-13 10:02:17 [resource1]: '
'CREATE_COMPLETE state changed\n'
'2015-11-13 10:02:17 [resource1]: '
'CREATE_COMPLETE state changed\n'
)
parsed_args = self.check_parser(self.cmd, arglist, [])
columns, data = self.cmd.take_action(parsed_args)
defaults_with_marker = dict(self.defaults)
defaults_with_marker['marker'] = '1234'
self.event_client.list.assert_has_calls([
mock.call(**self.defaults),
mock.call(**defaults_with_marker)
])
self.assertEqual([], columns)
self.assertEqual([], data)
self.assertEqual(expected, self.fake_stdout.make_string())
def test_event_list_log_format(self):
arglist = ['my_stack']
expected = ('2015-11-13 10:02:17 [resource1]: CREATE_COMPLETE '
'state changed\n')
parsed_args = self.check_parser(self.cmd, arglist, [])
self.cmd.run(parsed_args)
self.event_client.list.assert_called_with(**self.defaults)
self.assertEqual(expected, self.fake_stdout.make_string())
|
|
# -*- coding: utf-8 -*-
# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from functools import wraps
import json
import os
import re
from six.moves import urllib
import pyrax
from pyrax.client import BaseClient
import pyrax.exceptions as exc
from pyrax.manager import BaseManager
from pyrax.resource import BaseResource
import pyrax.utils as utils
# The hard-coded maximum number of messages returned in a single call.
MSG_LIMIT = 10
# Pattern for extracting the marker value from an href link.
marker_pat = re.compile(r".+\bmarker=(\d+).*")
def _parse_marker(body):
marker = None
links = body.get("links", [])
next_links = [link for link in links if link.get("rel") == "next"]
try:
next_link = next_links[0]["href"]
except IndexError:
next_link = ""
mtch = marker_pat.match(next_link)
if mtch:
marker = mtch.groups()[0]
return marker
def assure_queue(fnc):
"""
Converts a queue ID or name passed as the 'queue' parameter to a Queue
object.
"""
@wraps(fnc)
def _wrapped(self, queue, *args, **kwargs):
if not isinstance(queue, Queue):
# Must be the ID
queue = self._manager.get(queue)
return fnc(self, queue, *args, **kwargs)
return _wrapped
class BaseQueueManager(BaseManager):
"""
This class attempts to add in all the common deviations from the API
standards that the regular base classes are based on.
"""
def _list(self, uri, obj_class=None, body=None, return_raw=False,
other_keys=None):
try:
return super(BaseQueueManager, self)._list(uri, obj_class=None,
body=None, return_raw=return_raw, other_keys=other_keys)
except (exc.NotFound, AttributeError):
return []
class Queue(BaseResource):
"""
This class represents a Queue.
"""
def __init__(self, manager, info, key=None, loaded=False):
# Queues are often returned with no info
info = info or {"queue": {}}
super(Queue, self).__init__(manager, info, key=key, loaded=loaded)
self._repr_properties = ["id"]
self._message_manager = QueueMessageManager(self.manager.api,
resource_class=QueueMessage, response_key="",
plural_response_key="messages",
uri_base="queues/%s/messages" % self.id)
self._claim_manager = QueueClaimManager(self.manager.api,
resource_class=QueueClaim, response_key="",
plural_response_key="claims",
uri_base="queues/%s/claims" % self.id)
self._claim_manager._message_manager = self._message_manager
def get_message(self, msg_id):
"""
Returns the message whose ID matches the supplied msg_id from this
queue.
"""
return self._message_manager.get(msg_id)
def delete_message(self, msg_id, claim_id=None):
"""
Deletes the message whose ID matches the supplied msg_id from the
specified queue. If the message has been claimed, the ID of that claim
must be passed as the 'claim_id' parameter.
"""
return self._message_manager.delete(msg_id, claim_id=claim_id)
def list(self, include_claimed=False, echo=False, marker=None, limit=None):
"""
Returns a list of messages for this queue.
By default only unclaimed messages are returned; if you want claimed
messages included, pass `include_claimed=True`. Also, the requester's
own messages are not returned by default; if you want them included,
pass `echo=True`.
The 'marker' and 'limit' parameters are used to control pagination of
results. 'Marker' is the ID of the last message returned, while 'limit'
controls the number of messages returned per reuqest (default=20).
"""
return self._message_manager.list(include_claimed=include_claimed,
echo=echo, marker=marker, limit=limit)
def list_by_ids(self, ids):
"""
If you wish to retrieve a list of messages from this queue and know the
IDs of those messages, you can pass in a list of those IDs, and only
the matching messages will be returned. This avoids pulling down all
the messages in a queue and filtering on the client side.
"""
return self._message_manager.list_by_ids(ids)
def delete_by_ids(self, ids):
"""
Deletes the messages whose IDs are passed in from this queue.
"""
return self._message_manager.delete_by_ids(ids)
def list_by_claim(self, claim):
"""
Returns a list of all the messages from this queue that have been
claimed by the specified claim. The claim can be either a claim ID or a
QueueClaim object.
"""
if not isinstance(claim, QueueClaim):
claim = self._claim_manager.get(claim)
return claim.messages
def post_message(self, body, ttl):
"""
Create a message in this queue. The value of ttl must be between 60 and
1209600 seconds (14 days).
"""
return self._message_manager.create(body, ttl)
def claim_messages(self, ttl, grace, count=None):
"""
Claims up to `count` unclaimed messages from this queue. If count is
not specified, the default is to claim 10 messages.
The `ttl` parameter specifies how long the server should wait before
releasing the claim. The ttl value MUST be between 60 and 43200 seconds.
The `grace` parameter is the message grace period in seconds. The value
of grace MUST be between 60 and 43200 seconds. The server extends the
lifetime of claimed messages to be at least as long as the lifetime of
the claim itself, plus a specified grace period to deal with crashed
workers (up to 1209600 or 14 days including claim lifetime). If a
claimed message would normally live longer than the grace period, its
expiration will not be adjusted.
Returns a QueueClaim object, whose 'messages' attribute contains the
list of QueueMessage objects representing the claimed messages.
"""
return self._claim_manager.claim(ttl, grace, count=count)
def get_claim(self, claim):
"""
Returns a QueueClaim object with information about the specified claim.
If no such claim exists, a NotFound exception is raised.
"""
return self._claim_manager.get(claim)
def update_claim(self, claim, ttl=None, grace=None):
"""
Updates the specified claim with either a new TTL or grace period, or
both.
"""
return self._claim_manager.update(claim, ttl=ttl, grace=grace)
def release_claim(self, claim):
"""
Releases the specified claim and makes any messages previously claimed
by this claim as available for processing by other workers.
"""
return self._claim_manager.delete(claim)
@property
def id(self):
return self.name
@id.setter
def id(self, val):
self.name = val
class QueueMessage(BaseResource):
"""
This class represents a Message posted to a Queue.
"""
def __init__(self, *args, **kwargs):
self.id = None
self.age = None
self.body = None
self.href = None
self.ttl = None
self.claim_id = None
super(QueueMessage, self).__init__(*args, **kwargs)
def _add_details(self, info):
"""
The 'id' and 'claim_id' attributes are not supplied directly, but
included as part of the 'href' value.
"""
super(QueueMessage, self)._add_details(info)
if self.href is None:
return
parsed = urllib.parse.urlparse(self.href)
self.id = parsed.path.rsplit("/", 1)[-1]
query = parsed.query
if query:
self.claim_id = query.split("claim_id=")[-1]
def delete(self, claim_id=None):
"""
Deletes this message from its queue. If the message has been claimed,
the ID of that claim must be passed as the 'claim_id' parameter.
"""
return self.manager.delete(self, claim_id=claim_id)
class QueueClaim(BaseResource):
"""
This class represents a Claim for a Message posted by a consumer.
"""
id = None
messages = None
href = ""
def _add_details(self, info):
"""
The 'id' attribute is not supplied directly, but included as part of
the 'href' value. Also, convert the dicts for messages into
QueueMessage objects.
"""
msg_dicts = info.pop("messages", [])
super(QueueClaim, self)._add_details(info)
parsed = urllib.parse.urlparse(self.href)
self.id = parsed.path.rsplit("/", 1)[-1]
self.messages = [QueueMessage(self.manager._message_manager, item)
for item in msg_dicts]
class QueueMessageManager(BaseQueueManager):
"""
Manager class for a Queue Message.
"""
def _create_body(self, msg, ttl):
"""
Used to create the dict required to create a new message.
"""
body = [{
"body": msg,
"ttl": ttl,
}]
return body
def list(self, include_claimed=False, echo=False, marker=None, limit=None):
"""
Need to form the URI differently, so we can't use the default list().
"""
return self._iterate_list(include_claimed=include_claimed, echo=echo,
marker=marker, limit=limit)
def _iterate_list(self, include_claimed, echo, marker, limit):
"""
Recursive method to work around the hard limit of 10 items per call.
"""
ret = []
if limit is None:
this_limit = MSG_LIMIT
else:
this_limit = min(MSG_LIMIT, limit)
limit = limit - this_limit
uri = "/%s?include_claimed=%s&echo=%s" % (self.uri_base,
json.dumps(include_claimed), json.dumps(echo))
qs_parts = []
if marker is not None:
qs_parts.append("marker=%s" % marker)
if this_limit is not None:
qs_parts.append("limit=%s" % this_limit)
if qs_parts:
uri = "%s&%s" % (uri, "&".join(qs_parts))
resp, resp_body = self._list(uri, return_raw=True)
if not resp_body:
return ret
messages = resp_body.get(self.plural_response_key, [])
ret = [QueueMessage(manager=self, info=item) for item in messages]
marker = _parse_marker(resp_body)
loop = 0
if ((limit is None) or limit > 0) and marker:
loop += 1
ret.extend(self._iterate_list(include_claimed, echo, marker, limit))
return ret
def delete(self, msg, claim_id=None):
"""
Deletes the specified message from its queue. If the message has been
claimed, the ID of that claim must be passed as the 'claim_id'
parameter.
"""
msg_id = utils.get_id(msg)
if claim_id:
uri = "/%s/%s?claim_id=%s" % (self.uri_base, msg_id, claim_id)
else:
uri = "/%s/%s" % (self.uri_base, msg_id)
return self._delete(uri)
def list_by_ids(self, ids):
"""
If you wish to retrieve a list of messages from this queue and know the
IDs of those messages, you can pass in a list of those IDs, and only
the matching messages will be returned. This avoids pulling down all
the messages in a queue and filtering on the client side.
"""
ids = utils.coerce_to_list(ids)
uri = "/%s?ids=%s" % (self.uri_base, ",".join(ids))
# The API is not consistent in how it returns message lists, so this
# workaround is needed.
curr_prkey = self.plural_response_key
self.plural_response_key = ""
# BROKEN: API returns a list, not a dict.
ret = self._list(uri)
self.plural_response_key = curr_prkey
return ret
def delete_by_ids(self, ids):
"""
Deletes the messages whose IDs are passed in from this queue.
"""
ids = utils.coerce_to_list(ids)
uri = "/%s?ids=%s" % (self.uri_base, ",".join(ids))
return self.api.method_delete(uri)
class QueueClaimManager(BaseQueueManager):
"""
Manager class for a Queue Claims.
"""
def claim(self, ttl, grace, count=None):
"""
Claims up to `count` unclaimed messages from this queue. If count is
not specified, the default is to claim 10 messages.
The `ttl` parameter specifies how long the server should wait before
releasing the claim. The ttl value MUST be between 60 and 43200 seconds.
The `grace` parameter is the message grace period in seconds. The value
of grace MUST be between 60 and 43200 seconds. The server extends the
lifetime of claimed messages to be at least as long as the lifetime of
the claim itself, plus a specified grace period to deal with crashed
workers (up to 1209600 or 14 days including claim lifetime). If a
claimed message would normally live longer than the grace period, its
expiration will not be adjusted.
bReturns a QueueClaim object, whose 'messages' attribute contains the
list of QueueMessage objects representing the claimed messages.
"""
if count is None:
qs = ""
else:
qs = "?limit=%s" % count
uri = "/%s%s" % (self.uri_base, qs)
body = {"ttl": ttl,
"grace": grace,
}
resp, resp_body = self.api.method_post(uri, body=body)
if resp.status_code == 204:
# Nothing available to claim
return None
# Get the claim ID from the first message in the list.
href = resp_body[0]["href"]
claim_id = href.split("claim_id=")[-1]
return self.get(claim_id)
def update(self, claim, ttl=None, grace=None):
"""
Updates the specified claim with either a new TTL or grace period, or
both.
"""
body = {}
if ttl is not None:
body["ttl"] = ttl
if grace is not None:
body["grace"] = grace
if not body:
raise exc.MissingClaimParameters("You must supply a value for "
"'ttl' or 'grace' when calling 'update()'")
uri = "/%s/%s" % (self.uri_base, utils.get_id(claim))
resp, resp_body = self.api.method_patch(uri, body=body)
class QueueManager(BaseQueueManager):
"""
Manager class for a Queue.
"""
def _create_body(self, name, metadata=None):
"""
Used to create the dict required to create a new queue
"""
if metadata is None:
body = {}
else:
body = {"metadata": metadata}
return body
def get(self, id_):
"""
Need to customize, since Queues are not returned with normal response
bodies.
"""
if self.api.queue_exists(id_):
return Queue(self, {"queue": {"name": id_, "id_": id_}}, key="queue")
raise exc.NotFound("The queue '%s' does not exist." % id_)
def create(self, name):
uri = "/%s/%s" % (self.uri_base, name)
resp, resp_body = self.api.method_put(uri)
if resp.status_code == 201:
return Queue(self, {"name": name})
elif resp.status_code == 400:
# Most likely an invalid name
raise exc.InvalidQueueName("Queue names must not exceed 64 bytes "
"in length, and are limited to US-ASCII letters, digits, "
"underscores, and hyphens. Submitted: '%s'." % name)
def get_stats(self, queue):
"""
Returns the message stats for the specified queue.
"""
uri = "/%s/%s/stats" % (self.uri_base, utils.get_id(queue))
resp, resp_body = self.api.method_get(uri)
return resp_body.get("messages")
def get_metadata(self, queue):
"""
Returns the metadata for the specified queue.
"""
uri = "/%s/%s/metadata" % (self.uri_base, utils.get_id(queue))
resp, resp_body = self.api.method_get(uri)
return resp_body
def set_metadata(self, queue, metadata, clear=False):
"""
Accepts a dictionary and adds that to the specified queue's metadata.
If the 'clear' argument is passed as True, any existing metadata is
replaced with the new metadata.
"""
uri = "/%s/%s/metadata" % (self.uri_base, utils.get_id(queue))
if clear:
curr = {}
else:
curr = self.get_metadata(queue)
curr.update(metadata)
resp, resp_body = self.api.method_put(uri, body=curr)
class QueueClient(BaseClient):
"""
This is the primary class for interacting with Cloud Queues.
"""
name = "Cloud Queues"
client_id = None
def _configure_manager(self):
"""
Create the manager to handle queues.
"""
self._manager = QueueManager(self,
resource_class=Queue, response_key="queue",
uri_base="queues")
def _add_custom_headers(self, dct):
"""
Add the Client-ID header required by Cloud Queues
"""
if self.client_id is None:
self.client_id = os.environ.get("CLOUD_QUEUES_ID")
if self.client_id:
dct["Client-ID"] = self.client_id
def _api_request(self, uri, method, **kwargs):
"""
Any request that involves messages must define the client ID. This
handles all failures due to lack of client ID and raises the
appropriate exception.
"""
try:
return super(QueueClient, self)._api_request(uri, method, **kwargs)
except exc.BadRequest as e:
if ((e.code == "400") and
(e.message == 'The "Client-ID" header is required.')):
raise exc.QueueClientIDNotDefined("You must supply a client ID "
"to work with Queue messages.")
else:
raise
def get_home_document(self):
"""
You should never need to use this method; it is included for
completeness. It is meant to be used for API clients that need to
explore the API with no prior knowledge. This knowledge is already
included in the SDK, so it should never be necessary to work at this
basic a level, as all the functionality is exposed through normal
Python methods in the client.
If you are curious about the 'Home Document' concept, here is the
explanation from the Cloud Queues documentation:
The entire API is discoverable from a single starting point - the home
document. You do not need to know any more than this one URI in order
to explore the entire API. This document is cacheable.
The home document lets you write clients using a "follow-your-nose"
style so clients do not have to construct their own URLs. You can click
through and view the JSON doc in your browser.
For more information about home documents, see
http://tools.ietf.org/html/draft-nottingham-json-home-02.
"""
uri = self.management_url.rsplit("/", 1)[0]
return self.method_get(uri)
def queue_exists(self, name):
"""
Returns True or False, depending on the existence of the named queue.
"""
try:
queue = self._manager.head(name)
return True
except exc.NotFound:
return False
def create(self, name):
"""
Cloud Queues works differently, in that they use the name as the ID for
the resource. So for create(), we need to check if a queue by that name
exists already, and raise an exception if it does. If not, create the
queue and return a reference object for it.
"""
if self.queue_exists(name):
raise exc.DuplicateQueue("The queue '%s' already exists." % name)
return self._manager.create(name)
def get_stats(self, queue):
"""
Returns the message stats for the specified queue.
"""
return self._manager.get_stats(queue)
def get_metadata(self, queue):
"""
Returns the metadata for the specified queue.
"""
return self._manager.get_metadata(queue)
def set_metadata(self, queue, metadata, clear=False):
"""
Accepts a dictionary and adds that to the specified queue's metadata.
If the 'clear' argument is passed as True, any existing metadata is
replaced with the new metadata.
"""
return self._manager.set_metadata(queue, metadata, clear=clear)
@assure_queue
def get_message(self, queue, msg_id):
"""
Returns the message whose ID matches the supplied msg_id from the
specified queue.
"""
return queue.get_message(msg_id)
@assure_queue
def delete_message(self, queue, msg_id, claim_id=None):
"""
Deletes the message whose ID matches the supplied msg_id from the
specified queue. If the message has been claimed, the ID of that claim
must be passed as the 'claim_id' parameter.
"""
return queue.delete_message(msg_id, claim_id=claim_id)
@assure_queue
def list_messages(self, queue, include_claimed=False, echo=False,
marker=None, limit=None):
"""
Returns a list of messages for the specified queue.
By default only unclaimed messages are returned; if you want claimed
messages included, pass `include_claimed=True`. Also, the requester's
own messages are not returned by default; if you want them included,
pass `echo=True`.
The 'marker' and 'limit' parameters are used to control pagination of
results. 'Marker' is the ID of the last message returned, while 'limit'
controls the number of messages returned per reuqest (default=20).
"""
return queue.list(include_claimed=include_claimed, echo=echo,
marker=marker, limit=limit)
@assure_queue
def list_messages_by_ids(self, queue, ids):
"""
If you wish to retrieve a list of messages from a queue and know the
IDs of those messages, you can pass in a list of those IDs, and only
the matching messages will be returned. This avoids pulling down all
the messages in a queue and filtering on the client side.
"""
return queue.list_by_ids(ids)
@assure_queue
def delete_messages_by_ids(self, queue, ids):
"""
Deletes the messages whose IDs are passed in from the specified queue.
"""
return queue.delete_by_ids(ids)
@assure_queue
def list_messages_by_claim(self, queue, claim):
"""
Returns a list of all the messages from the specified queue that have
been claimed by the specified claim. The claim can be either a claim ID
or a QueueClaim object.
"""
return queue.list_by_claim(claim)
@assure_queue
def post_message(self, queue, body, ttl):
"""
Create a message in the specified queue. The value of ttl must be
between 60 and 1209600 seconds (14 days).
"""
return queue.post_message(body, ttl)
@assure_queue
def claim_messages(self, queue, ttl, grace, count=None):
"""
Claims up to `count` unclaimed messages from the specified queue. If
count is not specified, the default is to claim 10 messages.
The `ttl` parameter specifies how long the server should wait before
releasing the claim. The ttl value MUST be between 60 and 43200 seconds.
The `grace` parameter is the message grace period in seconds. The value
of grace MUST be between 60 and 43200 seconds. The server extends the
lifetime of claimed messages to be at least as long as the lifetime of
the claim itself, plus a specified grace period to deal with crashed
workers (up to 1209600 or 14 days including claim lifetime). If a
claimed message would normally live longer than the grace period, its
expiration will not be adjusted.
Returns a QueueClaim object, whose 'messages' attribute contains the
list of QueueMessage objects representing the claimed messages.
"""
return queue.claim_messages(ttl, grace, count=count)
@assure_queue
def get_claim(self, queue, claim):
"""
Returns a QueueClaim object with information about the specified claim.
If no such claim exists, a NotFound exception is raised.
"""
return queue.get_claim(claim)
@assure_queue
def update_claim(self, queue, claim, ttl=None, grace=None):
"""
Updates the specified claim with either a new TTL or grace period, or
both.
"""
return queue.update_claim(claim, ttl=ttl, grace=grace)
@assure_queue
def release_claim(self, queue, claim):
"""
Releases the specified claim and makes any messages previously claimed
by this claim as available for processing by other workers.
"""
return queue.release_claim(claim)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the preactivation form of Residual Networks.
Residual networks (ResNets) were originally proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant implemented in this module was
introduced by:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer.
Another difference is that 'v2' ResNets do not include an activation function in
the main pathway. Also see [2; Fig. 4e].
Typical use:
from tensorflow.contrib.slim.nets import resnet_v2
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = resnet_v2.resnet_v2_101(inputs, 1000, is_training=False)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope(is_training)):
net, end_points = resnet_v2.resnet_v2_101(inputs,
21,
is_training=False,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import resnet_utils
slim = tf.contrib.slim
resnet_arg_scope = resnet_utils.resnet_arg_scope
@slim.add_arg_scope
def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,
outputs_collections=None, scope=None):
"""Bottleneck residual unit variant with BN before convolutions.
This is the full preactivation residual unit variant proposed in [2]. See
Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck
variant which has an extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact')
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(preact, depth, [1, 1], stride=stride,
normalizer_fn=None, activation_fn=None,
scope='shortcut')
residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1,
scope='conv1')
residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
rate=rate, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1,
normalizer_fn=None, activation_fn=None,
scope='conv3')
output = shortcut + residual
return slim.utils.collect_named_outputs(outputs_collections,
sc.original_name_scope,
output)
def resnet_v2(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
reuse=None,
scope=None):
"""Generator for v2 (preactivation) ResNet models.
This function generates a family of ResNet v2 models. See the resnet_v2_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: whether is training or not.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it. If excluded, `inputs` should be the
results of an activation-less convolution.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = sc.name + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with slim.arg_scope([slim.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
# We do not include batch normalization or activation functions in
# conv1 because the first ResNet unit will perform these. Cf.
# Appendix of [2].
with slim.arg_scope([slim.conv2d],
activation_fn=None, normalizer_fn=None):
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
# This is needed because the pre-activation variant does not have batch
# normalization or activation functions in the residual unit output. See
# Appendix of [2].
net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm')
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
if num_classes is not None:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
# Convert end_points_collection into a dictionary of end_points.
end_points = dict(tf.get_collection(end_points_collection))
if num_classes is not None:
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
resnet_v2.default_image_size = 224
def resnet_v2_50(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_50'):
"""ResNet-50 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 5 + [(1024, 256, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(2048, 512, 1)] * 3)]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, reuse=reuse, scope=scope)
def resnet_v2_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_101'):
"""ResNet-101 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 22 + [(1024, 256, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(2048, 512, 1)] * 3)]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, reuse=reuse, scope=scope)
def resnet_v2_152(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_152'):
"""ResNet-152 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 7 + [(512, 128, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(2048, 512, 1)] * 3)]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, reuse=reuse, scope=scope)
def resnet_v2_200(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_200'):
"""ResNet-200 model of [2]. See resnet_v2() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 23 + [(512, 128, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(2048, 512, 1)] * 3)]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, reuse=reuse, scope=scope)
|
|
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from devops.helpers.helpers import wait
from devops.error import TimeoutError
from proboscis.asserts import assert_equal
from proboscis import SkipTest
from proboscis import test
from fuelweb_test import logger
from fuelweb_test import settings
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.helpers.decorators import retry
from fuelweb_test.helpers import os_actions
from fuelweb_test.tests import base_test_case
@test(groups=["thread_5", "ha"])
class TestNeutronFailover(base_test_case.TestBasic):
@classmethod
def get_node_with_dhcp(cls, self, os_conn, net_id):
node = os_conn.get_node_with_dhcp_for_network(net_id)[0]
node_fqdn = self.fuel_web.get_fqdn_by_hostname(node)
logger.debug('node name with dhcp is {0}'.format(node))
devops_node = self.fuel_web.find_devops_node_by_nailgun_fqdn(
node_fqdn, self.env.nodes().slaves[0:6])
return devops_node
@classmethod
def get_node_with_l3(cls, self, node_with_l3):
node_with_l3_fqdn = self.fuel_web.get_fqdn_by_hostname(node_with_l3)
logger.debug("new node with l3 is {0}".format(node_with_l3))
devops_node = self.fuel_web.find_devops_node_by_nailgun_fqdn(
node_with_l3_fqdn, self.env.nodes().slaves[0:6])
return devops_node
@classmethod
def create_instance_with_keypair(cls, os_conn, remote):
remote.execute(
'. openrc;'
' nova keypair-add instancekey > /root/.ssh/webserver_rsa')
remote.execute('chmod 400 /root/.ssh/webserver_rsa')
instance = os_conn.create_server_for_migration(
neutron=True, key_name='instancekey')
return instance
@classmethod
def reshedule_router_manually(cls, os_conn, router_id):
l3_agent_id = os_conn.get_l3_agent_ids(router_id)[0]
logger.debug("l3 agent id is {0}".format(l3_agent_id))
another_l3_agent = os_conn.get_available_l3_agents_ids(
l3_agent_id)[0]
logger.debug("another l3 agent is {0}".format(another_l3_agent))
os_conn.remove_l3_from_router(l3_agent_id, router_id)
os_conn.add_l3_to_router(another_l3_agent, router_id)
wait(lambda: os_conn.get_l3_agent_ids(router_id), timeout=60 * 5)
@classmethod
def check_instance_connectivity(cls, remote, dhcp_namespace, instance_ip):
cmd = ". openrc; ip netns exec {0} ssh -i /root/.ssh/webserver_rsa" \
" -o 'StrictHostKeyChecking no'" \
" cirros@{1} \"ping -c 1 8.8.8.8\"".format(dhcp_namespace,
instance_ip)
wait(lambda: remote.execute(cmd)['exit_code'] == 0, timeout=2 * 60)
res = remote.execute(cmd)
assert_equal(0, res['exit_code'],
'instance has no connectivity, exit code {0}'.format(
res['exit_code']))
@test(depends_on=[base_test_case.SetupEnvironment.prepare_release],
groups=["deploy_ha_neutron"])
@log_snapshot_on_error
def deploy_ha_neutron(self):
"""Deploy cluster in HA mode, Neutron with GRE segmentation
Scenario:
1. Create cluster. HA, Neutron with GRE segmentation
2. Add 3 nodes with controller roles
3. Add 2 nodes with compute roles
4. Add 1 node with cinder role
5. Deploy the cluster
Duration 90m
Snapshot deploy_ha_neutron
"""
try:
self.check_run('deploy_ha_neutron')
except SkipTest:
return
self.env.revert_snapshot("ready")
self.env.bootstrap_nodes(self.env.nodes().slaves[:6])
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=settings.DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": 'gre'
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['compute'],
'slave-06': ['cinder']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.env.make_snapshot("deploy_ha_neutron", is_make=True)
@test(depends_on=[deploy_ha_neutron],
groups=["neutron_l3_migration"])
@log_snapshot_on_error
def neutron_l3_migration(self):
"""Check l3-agent rescheduling after l3-agent dies
Scenario:
1. Revert snapshot with neutron cluster
2. Manually reschedule router from primary controller
to another one
3. Stop l3-agent on new node with pcs
4. Check l3-agent was rescheduled
5. Check network connectivity from instance via
dhcp namespace
6. Run OSTF
Duration 30m
"""
self.env.revert_snapshot("deploy_ha_neutron")
cluster_id = self.fuel_web.get_last_created_cluster()
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id))
net_id = os_conn.get_network('net04')['id']
devops_node = self.get_node_with_dhcp(self, os_conn, net_id)
remote = self.env.get_ssh_to_remote_by_name(devops_node.name)
dhcp_namespace = ''.join(remote.execute('ip netns | grep {0}'.format(
net_id))['stdout']).rstrip()
logger.debug('dhcp namespace is {0}'.format(dhcp_namespace))
instance_ip = \
self.create_instance_with_keypair(
os_conn, remote).addresses['net04'][0]['addr']
logger.debug('instance internal ip is {0}'.format(instance_ip))
router_id = os_conn.get_routers_ids()[0]
self.reshedule_router_manually(os_conn, router_id)
self.check_instance_connectivity(remote, dhcp_namespace, instance_ip)
node_with_l3 = os_conn.get_l3_agent_hosts(router_id)[0]
new_devops = self.get_node_with_l3(self, node_with_l3)
new_remote = self.env.get_ssh_to_remote_by_name(new_devops.name)
new_remote.execute("pcs resource ban p_neutron-l3-agent {0}".format(
node_with_l3))
try:
wait(lambda: not node_with_l3 == os_conn.get_l3_agent_hosts(
router_id)[0], timeout=60 * 3)
except TimeoutError:
raise TimeoutError(
"l3 agent wasn't banned, it is still {0}".format(
os_conn.get_l3_agent_hosts(router_id)[0]))
wait(lambda: os_conn.get_l3_agent_ids(router_id), timeout=60)
self.check_instance_connectivity(remote, dhcp_namespace, instance_ip)
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'])
new_remote.execute("pcs resource clear p_neutron-l3-agent {0}".
format(node_with_l3))
@test(depends_on=[deploy_ha_neutron],
groups=["neutron_l3_migration_after_reset"])
@log_snapshot_on_error
def neutron_l3_migration_after_reset(self):
"""Check l3-agent rescheduling after reset non-primary controller
Scenario:
1. Revert snapshot with neutron cluster
2. Manually reschedule router from primary controller
to another one
3. Reset controller with l3-agent
4. Check l3-agent was rescheduled
5. Check network connectivity from instance via
dhcp namespace
6. Run OSTF
Duration 30m
"""
self.env.revert_snapshot("deploy_ha_neutron")
cluster_id = self.fuel_web.get_last_created_cluster()
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id))
net_id = os_conn.get_network('net04')['id']
devops_node = self.get_node_with_dhcp(self, os_conn, net_id)
remote = self.env.get_ssh_to_remote_by_name(devops_node.name)
dhcp_namespace = ''.join(remote.execute('ip netns | grep {0}'.format(
net_id))['stdout']).rstrip()
logger.debug('dhcp namespace is {0}'.format(dhcp_namespace))
instance_ip = \
self.create_instance_with_keypair(
os_conn, remote).addresses['net04'][0]['addr']
logger.debug('instance internal ip is {0}'.format(instance_ip))
router_id = os_conn.get_routers_ids()[0]
self.reshedule_router_manually(os_conn, router_id)
self.check_instance_connectivity(remote, dhcp_namespace, instance_ip)
node_with_l3 = os_conn.get_l3_agent_hosts(router_id)[0]
new_devops = self.get_node_with_l3(self, node_with_l3)
self.fuel_web.warm_restart_nodes([new_devops])
try:
wait(lambda: not node_with_l3 == os_conn.get_l3_agent_hosts(
router_id)[0], timeout=60 * 3)
except TimeoutError:
raise TimeoutError(
"l3 agent wasn't rescheduled, it is still {0}".format(
os_conn.get_l3_agent_hosts(router_id)[0]))
wait(lambda: os_conn.get_l3_agent_ids(router_id), timeout=60)
self.check_instance_connectivity(remote, dhcp_namespace, instance_ip)
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'])
@test(depends_on=[deploy_ha_neutron],
groups=["neutron_l3_migration_after_destroy"])
@log_snapshot_on_error
def neutron_l3_migration_after_destroy(self):
"""Check l3-agent rescheduling after destroy non-primary controller
Scenario:
1. Revert snapshot with neutron cluster
2. Manually reschedule router from primary controller
to another one
3. Destroy controller with l3-agent
4. Check l3-agent was rescheduled
5. Check network connectivity from instance via
dhcp namespace
6. Run OSTF
Duration 30m
"""
self.env.revert_snapshot("deploy_ha_neutron")
cluster_id = self.fuel_web.get_last_created_cluster()
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id))
net_id = os_conn.get_network('net04')['id']
devops_node = self.get_node_with_dhcp(self, os_conn, net_id)
remote = self.env.get_ssh_to_remote_by_name(devops_node.name)
dhcp_namespace = ''.join(remote.execute('ip netns | grep {0}'.format(
net_id))['stdout']).rstrip()
logger.debug('dhcp namespace is {0}'.format(dhcp_namespace))
instance_ip = \
self.create_instance_with_keypair(
os_conn, remote).addresses['net04'][0]['addr']
logger.debug('instance internal ip is {0}'.format(instance_ip))
router_id = os_conn.get_routers_ids()[0]
self.reshedule_router_manually(os_conn, router_id)
self.check_instance_connectivity(remote, dhcp_namespace, instance_ip)
node_with_l3 = os_conn.get_l3_agent_hosts(router_id)[0]
new_devops = self.get_node_with_l3(self, node_with_l3)
new_devops.destroy()
wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
new_devops)['online'], timeout=60 * 10)
self.fuel_web.wait_mysql_galera_is_up(
[n.name for n in
set(self.env.nodes().slaves[:3]) - {new_devops}])
try:
wait(lambda: not node_with_l3 == os_conn.get_l3_agent_hosts(
router_id)[0], timeout=60 * 3)
except TimeoutError:
raise TimeoutError(
"l3 agent wasn't rescheduled, it is still {0}".format(
os_conn.get_l3_agent_hosts(router_id)[0]))
wait(lambda: os_conn.get_l3_agent_ids(router_id), timeout=60)
self.check_instance_connectivity(remote, dhcp_namespace, instance_ip)
@retry(count=3, delay=120)
def run_single_test(cluster_id):
self.fuel_web.run_single_ostf_test(
cluster_id, test_sets=['smoke'],
test_name='fuel_health.tests.smoke.'
'test_neutron_actions.TestNeutron.'
'test_check_neutron_objects_creation')
run_single_test(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'],
should_fail=1,
failed_test_name=['Check that required services are running'])
@test(depends_on=[deploy_ha_neutron],
groups=["neutron_packets_drops_stat"])
@log_snapshot_on_error
def neutron_packets_drop_stat(self):
"""Check packets drops statistic when size is equal to MTU
Scenario:
1. Revert snapshot with neutron cluster
2. Create instance, assign floating IP to it
3. Send ICMP packets from controller to instance with 1500 bytes
4. If at least 7 responses on 10 requests are received
assume test is passed
Duration 30m
"""
self.env.revert_snapshot("deploy_ha_neutron")
cluster_id = self.fuel_web.get_last_created_cluster()
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id))
instance = os_conn.create_server_for_migration(neutron=True)
floating_ip = os_conn.assign_floating_ip(instance)
logger.debug("instance floating ip is {0}".format(floating_ip.ip))
remote = self.env.get_ssh_to_remote_by_name('slave-01')
mtu_cmd = r"cat /sys/class/net/$(ip r g {0} |" \
r" sed -rn" \
r" 's/.*dev\s+(\S+)\s.*/\1/p')/mtu".format(floating_ip.ip)
mtu = ''.join(remote.execute(mtu_cmd)['stdout'])
logger.debug('mtu is equal to {0}'.format(mtu))
cmd = "ping -q -s {0} -c 7 -w 10 {1}".format(int(mtu) - 28,
floating_ip.ip)
res = remote.execute(cmd)
assert_equal(0, res['exit_code'],
'most packages were dropped, result is {0}'.format(res))
|
|
#!/usr/bin/env python
#############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015 Jason Pruitt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#############################################################################
import sys
import threading
from time import sleep
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from demo_ui import Ui_MainWindow
from fgpio import GPIO
from fgpio.boards.nanopi import Config
class Demo(QMainWindow, Ui_MainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.setWindowFlags(Qt.FramelessWindowHint)
self.setGeometry(QApplication.desktop().availableGeometry())
self.setFixedSize(320, 240)
self.setupUi(self)
self._init_events()
self._g = GPIO(Config())
self._init_buttons()
self._init_leds()
self._pin_servo = 26
self._servo_dc = 50
self._pin_dimmer = 22
self._dimmer_dc = 50
self._g.pwm_init(self._pin_servo, 20000000, 1500000)
self._g.pwm_init(self._pin_dimmer, 1000000, 500000)
def _exit(self):
self._close_servo()
self._close_dimmer()
self._close_leds()
self._close_buttons()
exit()
def _init_events(self):
self.tab_demo.currentChanged.connect(self._tab_changer)
self.btn_exit.clicked.connect(self._exit)
self.dial_servo.valueChanged.connect(self._servo_dial)
self.dial_dimmer.valueChanged.connect(self._dimmer_dial)
self.rbox_1.clicked.connect(self._rbox1_handler)
self.rbox_2.clicked.connect(self._rbox2_handler)
self.rbox_3.clicked.connect(self._rbox3_handler)
def _tab_changer(self, index):
if index == 0:
pass
elif index == 1:
self._g.pwm_stop(self._pin_servo)
self._init_dimmer()
self.dial_dimmer.setValue(self._dimmer_dc)
elif index == 2:
self._g.pwm_stop(self._pin_dimmer)
# See above comment about setting period in ns.
self._init_servo()
self.dial_servo.setValue(self._servo_dc)
def _init_leds(self):
self._toggling = False
self._l1 = 11
self._l2 = 13
self._g.gpio_init(self._l1, 'out')
self._g.gpio_init(self._l2, 'out')
self._g.gpio_write(self._l1, 1)
def _rbox1_handler(self):
self._toggling = False
self._g.gpio_write(self._l2, 0)
self._g.gpio_write(self._l1, 1)
def _rbox2_handler(self):
self._toggling = False
self._g.gpio_write(self._l1, 0)
self._g.gpio_write(self._l2, 1)
def _rbox3_handler(self):
self._toggling = True
r3 = threading.Thread(target=self._led_toggler)
r3.daemon = True
r3.start()
def _led_toggler(self):
led = False
while self._toggling:
led = not led
if led :
self._g.gpio_write(self._l1, 0)
self._g.gpio_write(self._l2, 1)
else:
self._g.gpio_write(self._l2, 0)
self._g.gpio_write(self._l1, 1)
sleep(1)
def _close_leds(self):
self._toggling = False
sleep(.2)
self._g.gpio_close(self._l1)
self._g.gpio_close(self._l2)
def _init_buttons(self):
self._button_running = True
b1 = threading.Thread(target=self._button_press, args=(29, self.lbl_btn_1))
b1.daemon = True
b1.start()
b2 = threading.Thread(target=self._button_press, args=(15, self.lbl_btn_2))
b2.daemon = True
b2.start()
def _button_press(self, pin, btn):
self._g.eint_init(pin, 'high')
btn_on = False
while self._button_running:
if self._g.eint_event(pin):
self._g.eint_clear(pin)
if not btn_on:
btn.setText('ON')
btn_on = True
else:
if btn_on:
btn.setText('OFF')
btn_on = False
sleep(.05)
self._g.eint_close(pin)
def _close_buttons(self):
self._button_running = False
sleep(.2)
def _init_dimmer(self):
self.dial_dimmer.setEnabled(True)
self._g.pwm_period(self._pin_dimmer, 1000000)
self._g.pwm_start(self._pin_dimmer)
def _close_dimmer(self):
self.dial_dimmer.setEnabled(False)
try:
self._g.pwm_close(self._pin_dimmer)
except:
pass
def _dimmer_dial(self, value):
base_dc = 0
max_dc = 1000000
new_dc = base_dc + (value*10000)
if new_dc > max_dc:
new_dc = max_dc
self._dimmer_dc = value
self._g.pwm_duty_cycle(self._pin_dimmer, new_dc)
def _init_servo(self):
self.dial_servo.setEnabled(True)
self._g.pwm_period(self._pin_servo, 20000000)
self._g.pwm_start(self._pin_servo)
def _close_servo(self):
self.dial_servo.setEnabled(False)
try:
self._g.pwm_close(self._pin_servo)
except:
pass
def _servo_dial(self, value):
base_dc = 1000000
max_dc = 2000000
new_dc = base_dc + (value*(base_dc/100))
if new_dc > max_dc:
new_dc = max_dc
self._servo_dc = value
self._g.pwm_duty_cycle(self._pin_servo, new_dc)
if __name__ == '__main__':
app = QApplication(sys.argv)
d = Demo()
d.show()
sys.exit(app.exec_())
|
|
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import six
import tempfile
import time
from os_win.utils import pathutils
from oslo_log import log as logging
import nova.conf
from nova import exception
from nova.i18n import _
from nova.virt.hyperv import constants
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
ERROR_INVALID_NAME = 123
# NOTE(claudiub): part of the pre-existing PathUtils is nova-specific and
# it does not belong in the os-win library. In order to ensure the same
# functionality with the least amount of changes necessary, adding as a mixin
# the os_win.pathutils.PathUtils class into this PathUtils.
class PathUtils(pathutils.PathUtils):
def get_instances_dir(self, remote_server=None):
local_instance_path = os.path.normpath(CONF.instances_path)
if remote_server and not local_instance_path.startswith(r'\\'):
if CONF.hyperv.instances_path_share:
path = CONF.hyperv.instances_path_share
else:
# Use an administrative share
path = local_instance_path.replace(':', '$')
return ('\\\\%(remote_server)s\\%(path)s' %
{'remote_server': remote_server, 'path': path})
else:
return local_instance_path
def _get_instances_sub_dir(self, dir_name, remote_server=None,
create_dir=True, remove_dir=False):
instances_path = self.get_instances_dir(remote_server)
path = os.path.join(instances_path, dir_name)
try:
if remove_dir:
self.check_remove_dir(path)
if create_dir:
self.check_create_dir(path)
return path
except WindowsError as ex:
if ex.winerror == ERROR_INVALID_NAME:
raise exception.AdminRequired(_(
"Cannot access \"%(instances_path)s\", make sure the "
"path exists and that you have the proper permissions. "
"In particular Nova-Compute must not be executed with the "
"builtin SYSTEM account or other accounts unable to "
"authenticate on a remote host.") %
{'instances_path': instances_path})
raise
def get_instance_migr_revert_dir(self, instance_name, create_dir=False,
remove_dir=False):
dir_name = '%s_revert' % instance_name
return self._get_instances_sub_dir(dir_name, None, create_dir,
remove_dir)
def get_instance_dir(self, instance_name, remote_server=None,
create_dir=True, remove_dir=False):
return self._get_instances_sub_dir(instance_name, remote_server,
create_dir, remove_dir)
def _lookup_vhd_path(self, instance_name, vhd_path_func,
*args, **kwargs):
vhd_path = None
for format_ext in ['vhd', 'vhdx']:
test_path = vhd_path_func(instance_name, format_ext,
*args, **kwargs)
if self.exists(test_path):
vhd_path = test_path
break
return vhd_path
def lookup_root_vhd_path(self, instance_name, rescue=False):
return self._lookup_vhd_path(instance_name, self.get_root_vhd_path,
rescue)
def lookup_configdrive_path(self, instance_name, rescue=False):
configdrive_path = None
for format_ext in constants.DISK_FORMAT_MAP:
test_path = self.get_configdrive_path(instance_name, format_ext,
rescue=rescue)
if self.exists(test_path):
configdrive_path = test_path
break
return configdrive_path
def lookup_ephemeral_vhd_path(self, instance_name, eph_name):
return self._lookup_vhd_path(instance_name,
self.get_ephemeral_vhd_path,
eph_name)
def get_root_vhd_path(self, instance_name, format_ext, rescue=False):
instance_path = self.get_instance_dir(instance_name)
image_name = 'root'
if rescue:
image_name += '-rescue'
return os.path.join(instance_path,
image_name + '.' + format_ext.lower())
def get_configdrive_path(self, instance_name, format_ext,
remote_server=None, rescue=False):
instance_path = self.get_instance_dir(instance_name, remote_server)
configdrive_image_name = 'configdrive'
if rescue:
configdrive_image_name += '-rescue'
return os.path.join(instance_path,
configdrive_image_name + '.' + format_ext.lower())
def get_ephemeral_vhd_path(self, instance_name, format_ext, eph_name):
instance_path = self.get_instance_dir(instance_name)
return os.path.join(instance_path, eph_name + '.' + format_ext.lower())
def get_base_vhd_dir(self):
return self._get_instances_sub_dir('_base')
def get_export_dir(self, instance_name):
dir_name = os.path.join('export', instance_name)
return self._get_instances_sub_dir(dir_name, create_dir=True,
remove_dir=True)
def get_vm_console_log_paths(self, instance_name, remote_server=None):
instance_dir = self.get_instance_dir(instance_name,
remote_server)
console_log_path = os.path.join(instance_dir, 'console.log')
return console_log_path, console_log_path + '.1'
def copy_vm_console_logs(self, instance_name, dest_host):
local_log_paths = self.get_vm_console_log_paths(
instance_name)
remote_log_paths = self.get_vm_console_log_paths(
instance_name, remote_server=dest_host)
for local_log_path, remote_log_path in zip(local_log_paths,
remote_log_paths):
if self.exists(local_log_path):
self.copy(local_log_path, remote_log_path)
def get_image_path(self, image_name):
# Note: it is possible that the path doesn't exist
base_dir = self.get_base_vhd_dir()
for ext in ['vhd', 'vhdx']:
file_path = os.path.join(base_dir,
image_name + '.' + ext.lower())
if self.exists(file_path):
return file_path
return None
def get_age_of_file(self, file_name):
return time.time() - os.path.getmtime(file_name)
def check_dirs_shared_storage(self, src_dir, dest_dir):
# Check if shared storage is being used by creating a temporary
# file at the destination path and checking if it exists at the
# source path.
LOG.debug("Checking if %(src_dir)s and %(dest_dir)s point "
"to the same location.",
dict(src_dir=src_dir, dest_dir=dest_dir))
try:
with tempfile.NamedTemporaryFile(dir=dest_dir) as tmp_file:
src_path = os.path.join(src_dir,
os.path.basename(tmp_file.name))
shared_storage = os.path.exists(src_path)
except OSError as e:
raise exception.FileNotFound(six.text_type(e))
return shared_storage
def check_remote_instances_dir_shared(self, dest):
# Checks if the instances dir from a remote host points
# to the same storage location as the local instances dir.
local_inst_dir = self.get_instances_dir()
remote_inst_dir = self.get_instances_dir(dest)
return self.check_dirs_shared_storage(local_inst_dir,
remote_inst_dir)
|
|
"""
Tests for riak contacts backend and collection.
"""
from datetime import datetime
from twisted.internet.defer import inlineCallbacks, returnValue
from zope.interface.verify import verifyObject
from vumi.tests.helpers import VumiTestCase
from vumi.tests.helpers import PersistenceHelper
from go.vumitools.contact import ContactStore, ContactNotFoundError
from go_api.collections import ICollection
from go_api.collections.errors import (
CollectionObjectNotFound, CollectionUsageError)
from go_contacts.backends.riak import (
RiakContactsBackend, RiakContactsCollection)
class TestRiakContactsBackend(VumiTestCase):
def setUp(self):
self.persistence_helper = self.add_helper(
PersistenceHelper(use_riak=True, is_sync=False))
@inlineCallbacks
def mk_backend(self):
manager = yield self.persistence_helper.get_riak_manager()
backend = RiakContactsBackend(manager, 10)
returnValue(backend)
@inlineCallbacks
def test_get_contacts_collection(self):
backend = yield self.mk_backend()
collection = backend.get_contact_collection("owner-1")
self.assertEqual(collection.contact_store.user_account_key, "owner-1")
self.assertTrue(isinstance(collection, RiakContactsCollection))
class TestRiakContactsCollection(VumiTestCase):
def setUp(self):
self.persistence_helper = self.add_helper(
PersistenceHelper(use_riak=True, is_sync=False))
@inlineCallbacks
def mk_collection(self, owner_id):
manager = yield self.persistence_helper.get_riak_manager()
contact_store = ContactStore(manager, owner_id)
collection = RiakContactsCollection(contact_store, 10)
returnValue(collection)
EXPECTED_DATE_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
CONTACT_FIELD_DEFAULTS = {
u'$VERSION': 2,
u'bbm_pin': None,
u'dob': None,
u'email_address': None,
u'facebook_id': None,
u'groups': [],
u'gtalk_id': None,
u'mxit_id': None,
u'name': None,
u'surname': None,
u'twitter_handle': None,
u'wechat_id': None,
u'extra': {},
u'subscription': {},
}
def assert_contact(self, contact, expected_partial):
expected = self.CONTACT_FIELD_DEFAULTS.copy()
expected.update(expected_partial)
if isinstance(expected.get("created_at"), datetime):
expected["created_at"] = expected["created_at"].strftime(
self.EXPECTED_DATE_FORMAT)
self.assertEqual(contact, expected)
def test_pick_fields(self):
pick_fields = RiakContactsCollection._pick_fields
self.assertEqual(
pick_fields({"a": "1", "b": "2"}, ["a", "c"]),
{"a": "1"})
def test_pick_contact_fields(self):
pick_contact_fields = RiakContactsCollection._pick_contact_fields
self.assertEqual(
pick_contact_fields({"msisdn": "+12345", "notfield": "xyz"}),
{"msisdn": "+12345"})
def test_check_contact_fields_success(self):
check_contact_fields = RiakContactsCollection._check_contact_fields
self.assertEqual(
check_contact_fields({"msisdn": "+12345"}),
{"msisdn": "+12345"})
def test_check_contact_fields_raises(self):
check_contact_fields = RiakContactsCollection._check_contact_fields
err = self.assertRaises(
CollectionUsageError, check_contact_fields,
{"msisdn": "+12345", "notfield": "xyz"})
self.assertEqual(str(err), "Invalid contact fields: notfield")
def test_check_contact_fields_raises_multiple_fields(self):
check_contact_fields = RiakContactsCollection._check_contact_fields
err = self.assertRaises(
CollectionUsageError, check_contact_fields,
{"msisdn": "+12345", "notfield": "xyz", "badfield": "foo"})
self.assertEqual(
str(err), "Invalid contact fields: badfield, notfield")
@inlineCallbacks
def test_collection_provides_ICollection(self):
"""
The return value of .get_row_collection() is an object that provides
ICollection.
"""
collection = yield self.mk_collection("owner-1")
verifyObject(ICollection, collection)
@inlineCallbacks
def test_init(self):
collection = yield self.mk_collection("owner-1")
self.assertEqual(collection.contact_store.user_account_key, "owner-1")
@inlineCallbacks
def test_get(self):
collection = yield self.mk_collection("owner-1")
new_contact = yield collection.contact_store.new_contact(
name=u"Bob", msisdn=u"+12345")
contact = yield collection.get(new_contact.key)
self.assert_contact(contact, {
u'key': new_contact.key,
u'created_at': new_contact.created_at,
u'msisdn': u'+12345',
u'name': u'Bob',
u'user_account': u'owner-1',
})
@inlineCallbacks
def test_get_non_existent_contact(self):
collection = yield self.mk_collection("owner-1")
d = collection.get("bad-contact-id")
err = yield self.failUnlessFailure(d, CollectionObjectNotFound)
self.assertEqual(str(err), "Contact 'bad-contact-id' not found.")
@inlineCallbacks
def test_create(self):
collection = yield self.mk_collection("owner-1")
key, contact = yield collection.create(None, {
"msisdn": u"+12345",
"name": u"Arthur",
"surname": u"of Camelot",
})
new_contact = yield collection.contact_store.get_contact_by_key(key)
self.assert_contact(contact, {
u'key': new_contact.key,
u'created_at': new_contact.created_at,
u'msisdn': u'+12345',
u'name': u'Arthur',
u'surname': u'of Camelot',
u'user_account': u'owner-1',
})
self.assertEqual(new_contact.key, key)
self.assertEqual(new_contact.name, u"Arthur")
self.assertEqual(new_contact.surname, u"of Camelot")
self.assertEqual(new_contact.msisdn, u"+12345")
@inlineCallbacks
def test_create_with_id_fails(self):
collection = yield self.mk_collection("owner-1")
d = collection.create(u"foo", {
"msisdn": u"+12345",
"name": u"Sir Gawain",
})
err = yield self.failUnlessFailure(d, CollectionUsageError)
self.assertEqual(
str(err), "A contact key may not be specified in contact creation")
@inlineCallbacks
def test_create_invalid_fields(self):
collection = yield self.mk_collection("owner-1")
d = collection.create(None, {
"unknown_field": u"foo",
"not_the_field": u"bar",
})
err = yield self.failUnlessFailure(d, CollectionUsageError)
self.assertEqual(
str(err), "Invalid contact fields: not_the_field, unknown_field")
@inlineCallbacks
def test_create_invalid_field_value(self):
collection = yield self.mk_collection("owner-1")
d = collection.create(None, {
"msisdn": 5,
})
err = yield self.failUnlessFailure(d, CollectionUsageError)
self.assertEqual(
str(err), "Value 5 is not a unicode string.")
@inlineCallbacks
def test_update(self):
collection = yield self.mk_collection("owner-1")
new_contact = yield collection.contact_store.new_contact(
name=u"Bob", msisdn=u"+12345")
contact = yield collection.update(new_contact.key, {
"msisdn": u"+6789",
})
self.assert_contact(contact, {
u'key': new_contact.key,
u'created_at': new_contact.created_at,
u'msisdn': u"+6789",
u'name': u'Bob',
u'user_account': u'owner-1',
})
@inlineCallbacks
def test_update_non_existent_contact(self):
collection = yield self.mk_collection("owner-1")
d = collection.update("bad-contact-id", {})
err = yield self.failUnlessFailure(d, CollectionObjectNotFound)
self.assertEqual(str(err), "Contact 'bad-contact-id' not found.")
@inlineCallbacks
def test_update_invalid_fields(self):
collection = yield self.mk_collection("owner-1")
new_contact = yield collection.contact_store.new_contact(
name=u"Bob", msisdn=u"+12345")
d = collection.update(new_contact.key, {
"unknown_field": u"foo",
"not_the_field": u"bar",
})
err = yield self.failUnlessFailure(d, CollectionUsageError)
self.assertEqual(
str(err), "Invalid contact fields: not_the_field, unknown_field")
@inlineCallbacks
def test_update_invalid_field_value(self):
collection = yield self.mk_collection("owner-1")
new_contact = yield collection.contact_store.new_contact(
name=u"Bob", msisdn=u"+12345")
d = collection.update(new_contact.key, {
"msisdn": None,
})
err = yield self.failUnlessFailure(d, CollectionUsageError)
self.assertEqual(
str(err), "None is not allowed as a value for non-null fields.")
@inlineCallbacks
def test_delete(self):
collection = yield self.mk_collection("owner-1")
new_contact = yield collection.contact_store.new_contact(
name=u"Bob", msisdn=u"+12345")
contact = yield collection.delete(new_contact.key)
self.assert_contact(contact, {
u'key': new_contact.key,
u'created_at': new_contact.created_at,
u'msisdn': u'+12345',
u'name': u'Bob',
u'user_account': u'owner-1',
})
d = collection.contact_store.get_contact_by_key("owner-1")
yield self.failUnlessFailure(d, ContactNotFoundError)
@inlineCallbacks
def test_delete_non_existent_contact(self):
collection = yield self.mk_collection("owner-1")
d = collection.delete("bad-contact-id")
err = yield self.failUnlessFailure(d, CollectionObjectNotFound)
self.assertEqual(str(err), "Contact 'bad-contact-id' not found.")
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec mini-batched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does traditional minibatching.
The key ops used are:
* placeholder for feeding in tensors for each example.
* embedding_lookup for fetching rows from the embedding matrix.
* sigmoid_cross_entropy_with_logits to calculate the loss.
* GradientDescentOptimizer for optimizing the loss.
* skipgram custom op that does input processing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec as word2vec
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model and "
"training summaries.")
flags.DEFINE_string("train_data", None, "Training text file. "
"E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "File consisting of analogies of four tokens."
"embedding 2 - embedding 1 + embedding 3 should be close "
"to embedding 4."
"See README.md for how to get 'questions-words.txt'.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.2, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 100,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 16,
"Number of training examples processed per step "
"(size of a minibatch).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy(b'france', b'paris', b'russia') and "
"model.nearby([b'proton', b'elephant', b'maxwell'])")
flags.DEFINE_integer("statistics_interval", 5,
"Print statistics every n seconds.")
flags.DEFINE_integer("summary_interval", 5,
"Save training summary to file every n seconds (rounded "
"up to statistics interval).")
flags.DEFINE_integer("checkpoint_interval", 600,
"Checkpoint the model (i.e. save the parameters) every n "
"seconds (rounded up to statistics interval).")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# How often to print statistics.
self.statistics_interval = FLAGS.statistics_interval
# How often to write to the summary file (rounds up to the nearest
# statistics_interval).
self.summary_interval = FLAGS.summary_interval
# How often to write checkpoints (rounds up to the nearest statistics
# interval).
self.checkpoint_interval = FLAGS.checkpoint_interval
# Where to write out summaries.
self.save_path = FLAGS.save_path
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
def read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def forward(self, examples, labels):
"""Build the graph for the forward pass."""
opts = self._options
# Declare all variables we need.
# Embedding: [vocab_size, emb_dim]
init_width = 0.5 / opts.emb_dim
emb = tf.Variable(
tf.random_uniform(
[opts.vocab_size, opts.emb_dim], -init_width, init_width),
name="emb")
self._emb = emb
# Softmax weight: [vocab_size, emb_dim]. Transposed.
sm_w_t = tf.Variable(
tf.zeros([opts.vocab_size, opts.emb_dim]),
name="sm_w_t")
# Softmax bias: [emb_dim].
sm_b = tf.Variable(tf.zeros([opts.vocab_size]), name="sm_b")
# Global step: scalar, i.e., shape [].
self.global_step = tf.Variable(0, name="global_step")
# Nodes to compute the nce loss w/ candidate sampling.
labels_matrix = tf.reshape(
tf.cast(labels,
dtype=tf.int64),
[opts.batch_size, 1])
# Negative sampling.
sampled_ids, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_matrix,
num_true=1,
num_sampled=opts.num_samples,
unique=True,
range_max=opts.vocab_size,
distortion=0.75,
unigrams=opts.vocab_counts.tolist()))
# Embeddings for examples: [batch_size, emb_dim]
example_emb = tf.nn.embedding_lookup(emb, examples)
# Weights for labels: [batch_size, emb_dim]
true_w = tf.nn.embedding_lookup(sm_w_t, labels)
# Biases for labels: [batch_size, 1]
true_b = tf.nn.embedding_lookup(sm_b, labels)
# Weights for sampled ids: [num_sampled, emb_dim]
sampled_w = tf.nn.embedding_lookup(sm_w_t, sampled_ids)
# Biases for sampled ids: [num_sampled, 1]
sampled_b = tf.nn.embedding_lookup(sm_b, sampled_ids)
# True logits: [batch_size, 1]
true_logits = tf.reduce_sum(tf.mul(example_emb, true_w), 1) + true_b
# Sampled logits: [batch_size, num_sampled]
# We replicate sampled noise labels for all examples in the batch
# using the matmul.
sampled_b_vec = tf.reshape(sampled_b, [opts.num_samples])
sampled_logits = tf.matmul(example_emb,
sampled_w,
transpose_b=True) + sampled_b_vec
return true_logits, sampled_logits
def nce_loss(self, true_logits, sampled_logits):
"""Build the graph for the NCE loss."""
# cross-entropy(logits, labels)
opts = self._options
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
true_logits, tf.ones_like(true_logits))
sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(
sampled_logits, tf.zeros_like(sampled_logits))
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
nce_loss_tensor = (tf.reduce_sum(true_xent) +
tf.reduce_sum(sampled_xent)) / opts.batch_size
return nce_loss_tensor
def optimize(self, loss):
"""Build the graph to optimize the loss function."""
# Optimizer nodes.
# Linear learning rate decay.
opts = self._options
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001, 1.0 - tf.cast(self._words, tf.float32) / words_to_train)
self._lr = lr
optimizer = tf.train.GradientDescentOptimizer(lr)
train = optimizer.minimize(loss,
global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train
def build_eval_graph(self):
"""Build the eval graph."""
# Eval graph
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._emb, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, self._options.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
def build_graph(self):
"""Build the graph for the full model."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, self._epoch, self._words, examples,
labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._examples = examples
self._labels = labels
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
true_logits, sampled_logits = self.forward(examples, labels)
loss = self.nce_loss(true_logits, sampled_logits)
tf.scalar_summary("NCE loss", loss)
self._loss = loss
self.optimize(loss)
# Properly initialize all variables.
tf.global_variables_initializer().run()
self.saver = tf.train.Saver()
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode("utf-8")
f.write("%s %d\n" % (vocab_word,
opts.vocab_counts[i]))
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(opts.save_path, self._session.graph)
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time, last_summary_time = initial_words, time.time(), 0
last_checkpoint_time = 0
while True:
time.sleep(opts.statistics_interval) # Reports our progress once a while.
(epoch, step, loss, words, lr) = self._session.run(
[self._epoch, self.global_step, self._loss, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\r" %
(epoch, step, lr, loss, rate), end="")
sys.stdout.flush()
if now - last_summary_time > opts.summary_interval:
summary_str = self._session.run(summary_op)
summary_writer.add_summary(summary_str, step)
last_summary_time = now
if now - last_checkpoint_time > opts.checkpoint_interval:
self.saver.save(self._session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=step.astype(int))
last_checkpoint_time = now
if epoch != initial_epoch:
break
for t in workers:
t.join()
return epoch
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
try:
total = self._analogy_questions.shape[0]
except AttributeError as e:
raise AttributeError("Need to read analogy questions.")
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
print(c)
break
print("unknown")
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
model.read_analogies() # Read analogy questions
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy(b'france', b'paris', b'russia')
# [1]: model.nearby([b'proton', b'elephant', b'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
|
|
import time
import numpy as np
import numpy.random as nr
import h5py
#from scipy import ndimage as nd
from scipy import io as sio
#from dpLoadh5 import dpLoadh5
#from dpWriteh5 import dpWriteh5
#from emdrp.utils.typesh5 import emLabels
from scipy import linalg as sla
from scipy import optimize as opt
from scipy import spatial as spt
from scipy.special import ellipkinc, ellipeinc
import vtk
from vtk.util import numpy_support as nps
mesh_in='/home/watkinspv/Downloads/K0057_soma_annotation/out/K0057-D31-somas_dsx12y12z4-clean-cut.0.mesh.h5'
points_per_area = 1e-4
doplots = True
plotsvdfit = True
plot_surf = False
opacity = 0.5
penalty = 0.
def vtkShow(mapper=None, renderer=None):
if renderer is None:
# open a window and display the data specified by mapper
# need an actor and a renderer to display data
actor = vtk.vtkActor()
actor.SetMapper(mapper)
renderer = vtk.vtkRenderer()
renderer.AddActor(actor)
#renderer.SetBackground(1.0, 1.0, 1.0)
renderer.SetBackground(0.0, 0.0, 0.0)
# optionally setup the camera (xxx - and lighting?)
#camera = renderer.MakeCamera()
#camera.SetPosition(-500.0, 245.5, 122.0)
#camera.SetFocalPoint(301.0, 245.5, 122.0)
#camera.SetViewAngle(30.0)
#camera.SetRoll(-90.0)
#renderer.SetActiveCamera(camera)
# setup the renderer window / interactor and run
renderWin = vtk.vtkRenderWindow()
renderWin.AddRenderer(renderer)
renderInteractor = vtk.vtkRenderWindowInteractor()
renderInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera()
renderInteractor.SetRenderWindow(renderWin)
renderWin.SetSize(600, 600)
renderInteractor.Initialize()
renderWin.Render()
renderInteractor.Start()
def ellipsoid_SA(r):
r = np.sort(r.reshape(-1), axis=0); a,b,c = r[2],r[1],r[0]
if np.isclose(a,b) and np.isclose(b,c):
#print('sphere')
ellipsoid_area = 4*np.pi*a**2
else:
# https://www.johndcook.com/blog/2014/07/06/ellipsoid-surface-area/
if np.isclose(a,b):
#print('oblate')
m=1
elif np.isclose(b,c):
#print('prolate')
m=0
else:
#print('triaxial')
m = (a**2 * (b**2 - c**2)) / (b**2 * (a**2 - c**2))
phi = np.arccos(c/a)
temp = ellipeinc(phi, m)*np.sin(phi)**2 + ellipkinc(phi, m)*np.cos(phi)**2
ellipsoid_area = 2*np.pi*(c**2 + a*b*temp/np.sin(phi))
return ellipsoid_area
def ellipsoid_points(R, C, points_per_area):
# incorrect method
# theta, phi = np.mgrid[0:nsurfpts+1, 0:nsurfpts+1]/nsurfpts
# theta = theta*np.pi - np.pi/2; phi = phi*2*np.pi - np.pi
#
# x = X[0]*np.cos(theta)*np.cos(phi);
# y = X[1]*np.cos(theta)*np.sin(phi);
# z = X[2]*np.sin(theta);
# return np.vstack((x.reshape(-1), y.reshape(-1), z.reshape(-1))).T + np.array(X[3:]).reshape((1,3))
SA = ellipsoid_SA(R); nsurfpts = int(SA*points_per_area)
# for broadcoast, use npts x 3, dimensions along axis 1
R = R.reshape((1,3)); C = C.reshape((1,3))
# https://math.stackexchange.com/questions/973101/how-to-generate-points-uniformly-distributed-on-the-surface-of-an-ellipsoid?answertab=oldest#tab-top
pts = nr.randn(nsurfpts,3) * R; d = np.sqrt( (pts*pts/R/R).sum(1) )[:, None]
return pts/d + C
# brute force method
# npts = 0; pts = np.zeros((nsurfpts,3),dtype=np.double)
# while npts < nsurfpts:
# cpts = nr.rand(1e6,3) * 2*(R+1) - (R+1)
# sel = (abs(np.sqrt((cpts*cpts/R/R).sum(1)) - 1) < 1e-3)
# cnpts = sel.sum(); print(cnpts)
# if npts + cnpts < nsurfpts:
# pts[npts:npts+cnpts,:] = cpts[sel,:]; npts = npts + cnpts
# else:
# pts[npts:,:] = cpts[sel,:][:nsurfpts-npts,:]; npts = nsurfpts
# return pts + C
def ellipsoid_distance(X, surf_tree, surf_pts, points_per_area, penalty=0.):
#print(X)
R = X[:3].reshape((1,3)); C = X[3:].reshape((1,3))
epts = ellipsoid_points(R,C,points_per_area)
d,i = tree.query(epts)
dist1 = d.mean()
if penalty > 0:
# introduce some penalty for surface points that are inside the ellipse
sel = (((surf_pts-C)**2/R/R).sum(1) < 1)
perc_inside = sel.sum(dtype=np.double)/surf_pts.shape[0]
return dist1 + perc_inside*dist1*penalty
else:
return dist1
# get the volume and surface area data from the mesh file
h5file = h5py.File(mesh_in, 'r'); dset_root = h5file['0']
str_seed = ('%08d' % 0)
scale = dset_root[str_seed]['faces'].attrs['scale']; voxel_volume = scale.prod()
vertex_divisor = dset_root[str_seed]['faces'].attrs['vertex_divisor']
nseeds = len(dset_root)-1
# for saving fits
svd_rads = np.zeros((nseeds,3),np.double); svd_ctrs = np.zeros((nseeds,3),np.double)
min_rads = np.zeros((nseeds,3),np.double); min_ctrs = np.zeros((nseeds,3),np.double)
svd_rots = np.zeros((nseeds,3,3),np.double)
for i in range(nseeds):
#for i in range(3):
print('Processing soma %d' % (i,)); t = time.time()
str_seed = ('%08d' % (i+1,))
#soma_volumes[i] = dset_root[str_seed]['vertices'].attrs['nVoxels'] * voxel_volume
#soma_surface_areas[i] = dset_root[str_seed]['vertices'].attrs['surface_area']
vertices = np.empty_like(dset_root[str_seed]['vertices'])
faces = np.empty_like(dset_root[str_seed]['faces'])
dset_root[str_seed]['vertices'].read_direct(vertices)
dset_root[str_seed]['faces'].read_direct(faces)
nvertices = vertices.shape[0]; nfaces = faces.shape[0]
vertices = vertices.astype(np.double) / vertex_divisor
# for global coordinates add bounding box offset
vertices += dset_root[str_seed]['vertices'].attrs['bounds_beg'] / vertex_divisor
# vtk needs unstructured grid preceded with number of points in each cell
if plot_surf:
pfaces = np.hstack((3*np.ones((nfaces, 1),dtype=faces.dtype), faces))
else:
faces = np.arange(nvertices, dtype=faces.dtype)[:,None]; nfaces = nvertices
pfaces = np.hstack((1*np.ones((nfaces, 1),dtype=faces.dtype), faces))
# use vertices as points for fitting
pts = vertices.copy(); npts = pts.shape[0]
# sel = (somas[svox_bnd[j-1]] == j) # binary select within bounding box
# pts = np.transpose(np.nonzero(sel)).astype(np.double)*sampling # pts is nx3
# npts = pts.shape[0]
# svd on centered points to get principal axes.
# NOTE IMPORTANT: from scipy, svd different from matlab:
# "The SVD is commonly written as a = U S V.H. The v returned by this function is V.H and u = U."
# pts is Nx3, eigenvectors in V are along the rows
C = pts.mean(0)[:,None];
U, S, Vt = sla.svd(pts - C.T,overwrite_a=False,full_matrices=False)
# the std of the points along the eigenvectors
svd_std = np.sqrt(S**2/(npts-1))[:,None];
# rotate the points to align on cartesian axes
rpts = ((np.dot(Vt, pts.T - C) + C).T).copy(order='C')
# create kdtree to find closest points to mesh vertices
tree = spt.cKDTree(rpts)
# for optimization bounds
minrpts = rpts.min(0); maxrpts = rpts.max(0)
minR = 0.5**(1/3)*svd_std.copy(); maxR = 8**(1/3)*svd_std.copy()
bounds = ((minR[0],maxR[0]),(minR[1],maxR[1]),(minR[2],maxR[2]),
(minrpts[0],maxrpts[0]),(minrpts[1],maxrpts[1]),(minrpts[2],maxrpts[2]))
# scale ellipse by some number of stds as one method of fitting ellipsoid
svd_ctr = C; fit_dist = np.inf
for s in np.arange(1,10,0.1):
crad = s**(1/3)*svd_std
cdist = ellipsoid_distance(np.vstack((crad,svd_ctr)), tree, rpts, points_per_area, penalty)
if cdist < fit_dist:
svd_rad = crad; fit_dist = cdist
print('\tDistance %.4f with SVD rad %.4f %.4f %.4f ctr %.4f %.4f %.4f' % (fit_dist,
svd_rad[0],svd_rad[1],svd_rad[2],svd_ctr[0],svd_ctr[1],svd_ctr[2]))
svd_rads[i,:] = svd_rad.reshape(-1); svd_ctrs[i,:] = svd_ctr.reshape(-1)
svd_rots[i,:,:] = Vt
# default to svd "fits"
fit_rad = svd_rad; fit_ctr = svd_ctr;
# for testing ellipsoid_distance
#ellipsoid_distance(np.vstack((s,C)), tree, points_per_area)
## for testing ellipse points
#s = np.array((500,1000,1500),dtype=np.double); C = np.zeros((3,1), np.double)
#rpts = ellipsoid_points(s,C,points_per_area).copy(order='C'); nvertices = rpts.shape[0]
#faces = np.arange(nvertices, dtype=faces.dtype)[:,None]; nfaces = nvertices
#pfaces = np.hstack((1*np.ones((nfaces, 1),dtype=faces.dtype), faces))
# normal local minimization functions do not work, error function is not smooth at all
#X,success = opt.leastsq(ellipsoid_distance, np.vstack((minR*1.1,C-500)),
# args=(tree, points_per_area))
#print(X,success)
#res = opt.minimize(ellipsoid_distance, np.vstack((minR*1.1,C-500)),
# args=(tree, points_per_area), bounds=bounds)
#print(res)
# global minimization methods
#X = opt.brute(ellipsoid_distance, bounds, args=(tree, points_per_area))
res = opt.differential_evolution(ellipsoid_distance, bounds, args=(tree, rpts, points_per_area, penalty),
maxiter=10000, strategy='best1bin', polish=False, disp=False)
fit_rad = np.array(res.x[:3]).reshape((3,1)); fit_ctr = np.array(res.x[3:]).reshape((3,1))
fit_dist = ellipsoid_distance(np.vstack((fit_rad,fit_ctr)), tree, rpts, points_per_area, penalty)
print('\tDistance %.4f with min rad %.4f %.4f %.4f ctr %.4f %.4f %.4f' % (fit_dist,
fit_rad[0],fit_rad[1],fit_rad[2],fit_ctr[0],fit_ctr[1],fit_ctr[2]))
min_rads[i,:] = fit_rad.reshape(-1); min_ctrs[i,:] = fit_ctr.reshape(-1)
print('\tdone in %.4f s' % (time.time() - t,))
if doplots:
renderer = vtk.vtkRenderer()
allPolyData = vtk.vtkAppendPolyData()
# create a sphere in blue
sphSrc = vtk.vtkSphereSource();
sphSrc.SetCenter(0,0,0);
sphSrc.SetRadius(1.0);
sphSrc.SetThetaResolution(100)
sphSrc.SetPhiResolution(100)
translation = vtk.vtkTransform();
translation.Scale(fit_rad[0],fit_rad[1],fit_rad[2])
translation.PostMultiply()
translation.Translate(fit_ctr[0],fit_ctr[1],fit_ctr[2]);
transformFilter = vtk.vtkTransformPolyDataFilter();
transformFilter.SetInputConnection(sphSrc.GetOutputPort());
transformFilter.SetTransform(translation);
sphMapper = vtk.vtkPolyDataMapper()
sphMapper.SetInputConnection(transformFilter.GetOutputPort())
sphActor = vtk.vtkActor()
sphActor.SetMapper(sphMapper)
sphActor.GetProperty().SetColor(0,0,1)
sphActor.GetProperty().SetOpacity(opacity)
renderer.AddActor(sphActor)
if plotsvdfit:
# create a sphere in red
sphSrc2 = vtk.vtkSphereSource();
sphSrc2.SetCenter(0,0,0);
sphSrc2.SetRadius(1.0);
sphSrc2.SetThetaResolution(100)
sphSrc2.SetPhiResolution(100)
translation2 = vtk.vtkTransform();
translation2.Scale(svd_rad[0],svd_rad[1],svd_rad[2])
translation2.PostMultiply()
translation2.Translate(svd_ctr[0],svd_ctr[1],svd_ctr[2]);
transformFilter2 = vtk.vtkTransformPolyDataFilter();
transformFilter2.SetInputConnection(sphSrc2.GetOutputPort());
transformFilter2.SetTransform(translation2);
sphMapper2 = vtk.vtkPolyDataMapper()
sphMapper2.SetInputConnection(transformFilter2.GetOutputPort())
sphActor2 = vtk.vtkActor()
sphActor2.SetMapper(sphMapper2)
sphActor2.GetProperty().SetColor(1,0,0)
sphActor2.GetProperty().SetOpacity(opacity)
renderer.AddActor(sphActor2)
# create vertices for polydata
# http://www.vtk.org/Wiki/VTK/Examples/Python/GeometricObjects/Display/Polygon
points = vtk.vtkPoints(); points.SetData(nps.numpy_to_vtk(rpts))
# create cells to be used as faces or vertices
# http://stackoverflow.com/questions/20146421/how-to-convert-a-mesh-to-vtk-format/20146620#20146620
cells = vtk.vtkCellArray()
cells.SetCells(nfaces, nps.numpy_to_vtk(pfaces, array_type=vtk.vtkIdTypeArray().GetDataType()))
# set faces and vertices of polydata
polyData = vtk.vtkPolyData(); polyData.SetPoints(points)
if plot_surf:
polyData.SetPolys(cells)
else:
polyData.SetVerts(cells)
# use appendpolydata to render multiple supervoxels per object
allPolyData.AddInputData(polyData)
allMappers = vtk.vtkPolyDataMapper()
allMappers.SetInputConnection(allPolyData.GetOutputPort())
allActors = vtk.vtkActor()
allActors.SetMapper(allMappers)
allActors.GetProperty().SetColor(0,1,0)
allActors.GetProperty().SetPointSize(2)
allActors.GetProperty().SetOpacity(opacity)
renderer.AddActor(allActors)
vtkShow(renderer=renderer)
h5file.close()
mat_out='/home/watkinspv/Downloads/K0057_soma_annotation/out/somas_cut_fit_surf_penalty.mat'
sio.savemat(mat_out, {'svd_rads':svd_rads, 'svd_ctrs':svd_ctrs, 'min_rads':min_rads,
'min_ctrs':min_ctrs, 'svd_rots':svd_rots})
|
|
#! /usr/bin/env python
from MFT import MFTEnumerator
import array
import re
import logging
import datetime
import argparse
from jinja2 import Template
from BinaryParser import Mmap
from MFT import Cache
from MFT import ATTR_TYPE
from MFT import MREF
from MFT import MSEQNO
from MFT import IndexRootHeader
from MFT import Attribute
from MFT import FilenameAttribute
from MFT import StandardInformationFieldDoesNotExist
ASCII_BYTE = " !\"#\$%&\'\(\)\*\+,-\./0123456789:;<=>\?@ABCDEFGHIJKLMNOPQRSTUVWXYZ\[\]\^_`abcdefghijklmnopqrstuvwxyz\{\|\}\\\~"
def ascii_strings(buf, n=4):
reg = "([%s]{%d,})" % (ASCII_BYTE, n)
ascii_re = re.compile(reg)
for match in ascii_re.finditer(buf):
if isinstance(match.group(), array.array):
yield match.group().tostring().decode("ascii")
else:
yield match.group().decode("ascii")
def unicode_strings(buf, n=4):
reg = b"((?:[%s]\x00){4,})" % (ASCII_BYTE)
ascii_re = re.compile(reg)
for match in ascii_re.finditer(buf):
try:
if isinstance(match.group(), array.array):
yield match.group().tostring().decode("utf-16")
else:
yield match.group().decode("utf-16")
except UnicodeDecodeError:
pass
def get_flags(flags):
"""
Get readable list of attribute flags.
"""
attributes = []
for flag in Attribute.FLAGS.keys():
if flags & flag:
attributes.append(Attribute.FLAGS[flag])
return attributes
def create_safe_datetime(fn):
try:
return fn()
except ValueError:
return datetime.datetime(1970, 1, 1, 0, 0, 0)
def create_safe_timeline_entry(fn, type_, source, path):
return {
"timestamp": create_safe_datetime(fn),
"type": type_,
"source": source,
"path": path,
}
def create_safe_timeline_entries(attr, source, path):
return [
create_safe_timeline_entry(attr.created_time, "birthed", source, path),
create_safe_timeline_entry(attr.accessed_time, "accessed", source, path),
create_safe_timeline_entry(attr.modified_time, "modified", source, path),
create_safe_timeline_entry(attr.changed_time, "changed", source, path),
]
def get_timeline_entries(record):
entries = []
si = record.standard_information()
fn = record.filename_information()
if si and fn:
filename = fn.filename()
entries.extend(create_safe_timeline_entries(si, "$SI", filename))
for b in record.attributes():
if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
continue
attr = FilenameAttribute(b.value(), 0, record)
attr_filename = attr.filename()
entries.extend(create_safe_timeline_entries(attr, "$FN", attr_filename))
indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
if indxroot and indxroot.non_resident() == 0:
irh = IndexRootHeader(indxroot.value(), 0, False)
for e in irh.node_header().entries():
fn = e.filename_information()
fn_filename = fn.filename()
entries.extend(create_safe_timeline_entries(fn, "INDX", fn_filename))
for e in irh.node_header().slack_entries():
fn = e.filename_information()
fn_filename = fn.filename()
entries.extend(create_safe_timeline_entries(fn, "slack-INDX", fn_filename))
return sorted(entries, key=lambda x: x["timestamp"] or datetime.datetime(1970, 1, 1, 0, 0, 0))
def make_filename_information_model(attr):
if attr is None:
return None
return {
"type": ["POSIX", "WIN32", "DOS 8.3", "WIN32 + DOS 8.3"][attr.filename_type()],
"name": attr.filename(),
"flags": get_flags(attr.flags()),
"logical_size": attr.logical_size(),
"physical_size": attr.physical_size(),
"modified": create_safe_datetime(attr.modified_time),
"accessed": create_safe_datetime(attr.accessed_time),
"changed": create_safe_datetime(attr.changed_time),
"created": create_safe_datetime(attr.created_time),
"parent_ref": MREF(attr.mft_parent_reference()),
"parent_seq": MSEQNO(attr.mft_parent_reference()),
}
def make_standard_information_model(attr):
if attr is None:
return None
# if attr is None:
# default_time = datetime.datetime(1970, 1, 1, 0, 0, 0)
# return {
# "created": default_time,
# "modified": default_time,
# "changed": default_time,
# "accessed": default_time,
# "owner_id": 0,
# "security_id": "",
# "quota_charged": 0,
# "usn": 0
# }
ret = {
"created": create_safe_datetime(attr.created_time),
"modified": create_safe_datetime(attr.modified_time),
"changed": create_safe_datetime(attr.changed_time),
"accessed": create_safe_datetime(attr.accessed_time),
"flags": get_flags(attr.attributes())
}
# since the fields are sequential, we can handle an exception half way through here
# and then ignore the remaining items. Dont have to worry about individual try/catches
try:
ret["owner_id"] = attr.owner_id()
ret["security_id"] = attr.security_id()
ret["quota_charged"] = attr.quota_charged()
ret["usn"] = attr.usn()
except StandardInformationFieldDoesNotExist:
pass
return ret
def make_attribute_model(attr):
ret = {
"type": Attribute.TYPES[attr.type()],
"name": attr.name(),
"flags": get_flags(attr.flags()),
"is_resident": attr.non_resident() == 0,
"data_size": 0,
"allocated_size": 0,
"value_size": 0,
"runs": [],
}
if attr.non_resident() > 0:
ret["data_size"] = attr.data_size()
ret["allocated_size"] = attr.allocated_size()
if attr.allocated_size() > 0:
for (offset, length) in attr.runlist().runs():
ret["runs"].append({
"offset": offset,
"length": length,
})
else:
ret["value_size"] = attr.value_length()
return ret
def make_model(record, path):
active_data = record.active_data()
slack_data = record.slack_data()
model = {
"magic": record.magic(),
"path": path,
"inode": record.inode,
"is_active": record.is_active(),
"is_directory": record.is_directory(),
"size": 0, # updated below
"standard_information": make_standard_information_model(record.standard_information()),
"filename_information": make_filename_information_model(record.filename_information()),
"owner_id": 0, # updated below
"security_id": 0, # updated below
"quota_charged": 0, # updated below
"usn": 0, # updated below
"filenames": [],
"attributes": [],
"indx_entries": [],
"slack_indx_entries": [],
"timeline": get_timeline_entries(record),
"active_ascii_strings": ascii_strings(active_data),
"active_unicode_strings": unicode_strings(active_data),
"slack_ascii_strings": ascii_strings(slack_data),
"slack_unicode_strings": unicode_strings(slack_data),
}
if not record.is_directory():
data_attr = record.data_attribute()
if data_attr and data_attr.non_resident() > 0:
model["size"] = data_attr.data_size()
elif record.filename_information() is not None:
model["size"] = record.filename_information().logical_size()
else:
model["size"] = 0
for b in record.attributes():
if b.type() != ATTR_TYPE.FILENAME_INFORMATION:
continue
attr = FilenameAttribute(b.value(), 0, record)
model["filenames"].append(make_filename_information_model(attr))
for b in record.attributes():
model["attributes"].append(make_attribute_model(b))
indxroot = record.attribute(ATTR_TYPE.INDEX_ROOT)
if indxroot and indxroot.non_resident() == 0:
irh = IndexRootHeader(indxroot.value(), 0, False)
for e in irh.node_header().entries():
m = make_filename_information_model(e.filename_information())
m["inode"] = MREF(e.mft_reference())
m["sequence_num"] = MSEQNO(e.mft_reference())
model["indx_entries"].append(m)
for e in irh.node_header().slack_entries():
m = make_filename_information_model(e.filename_information())
m["inode"] = MREF(e.mft_reference())
m["sequence_num"] = MSEQNO(e.mft_reference())
model["slack_indx_entries"].append(m)
return model
def format_record(record, path):
template = Template(
"""\
MFT Record: {{ record.inode }}
Path: {{ record.path }}
Metadata:
Active: {{ record.is_active }}
{% if record.is_directory %}\
Type: directory\
{% else %}\
Type: file\
{% endif %}
Flags: {{ record.standard_information.flags|join(', ') }}
$SI Modified: {{ record.standard_information.modified }}
$SI Accessed: {{ record.standard_information.accessed }}
$SI Changed: {{ record.standard_information.changed }}
$SI Birthed: {{ record.standard_information.created }}
Owner ID: {{ record.standard_information.owner_id }}
Security ID: {{ record.standard_information.security_id }}
Quota charged: {{ record.standard_information.quota_charged }}
USN: {{ record.standard_information.usn }}
Filenames: \
{% for filename in record.filenames %}
Type: {{ filename.type }}
Name: {{ filename.name }}
Flags: {{ filename.flags|join(', ') }}
Logical size: {{ filename.logical_size }}
Physical size: {{ filename.physical_size }}
Modified: {{ filename.modified }}
Accessed: {{ filename.accessed }}
Changed: {{ filename.changed }}
Birthed: {{ filename.created }}
Parent reference: {{ filename.parent_ref }}
Parent sequence number: {{ filename.parent_seq }}\
{% endfor %}
Attributes: \
{% for attribute in record.attributes %}
Type: {{ attribute.type }}
Name: {{ attribute.name }}
Flags: {{ attribute.flags|join(', ') }}
Resident: {{ attribute.is_resident }}
Data size: {{ attribute.data_size }}
Allocated size: {{ attribute.allocated_size }}
Value size: {{ attribute.value_size }} \
{% if attribute.runs %}
Data runs: {% for run in attribute.runs %}
Offset (clusters): {{ run.offset }} Length (clusters): {{ run.length }} \
{% endfor %}\
{% endif %}\
{% endfor %}
INDX root entries:\
{% if not record.indx_entries %}\
<none>\
{% endif %}\
{% for indx in record.indx_entries %}
Name: {{ indx.filename }}
Size: {{ indx.size }}
Modified: {{ indx.modified }}
Accessed: {{ indx.accessed }}
Changed: {{ indx.changed }}
Birthed: {{ indx.created }}
Reference: {{ indx.inode }}
Sequence number: {{ indx.sequence_num }}\
{% endfor %}
INDX root slack entries:\
{% if not record.slack_indx_entries %}\
<none>\
{% endif %}\
{% for indx in record.slack_indx_entries %}
Name: {{ indx.filename }}
Size: {{ indx.size }}
Modified: {{ indx.modified }}
Accessed: {{ indx.accessed }}
Changed: {{ indx.changed }}
Birthed: {{ indx.created }}
Reference: {{ indx.inode }}
Sequence number: {{ indx.sequence_num }}\
{% endfor %}
Timeline:
{% for entry in record.timeline %}\
{{ "%-30s%-12s%-8s%s"|format(entry.timestamp, entry.type, entry.source, entry.path) }}
{% endfor %}\
Active strings:
ASCII strings:
{% for string in record.active_ascii_strings %}\
{{ string }}
{% endfor %}\
Unicode strings:
{% for string in record.active_unicode_strings %}\
{{ string }}
{% endfor %}\
Slack strings:
ASCII strings:
{% for string in record.slack_ascii_strings %}\
{{ string }}
{% endfor %}\
Unicode strings:
{% for string in record.slack_unicode_strings %}\
{{ string }}
{% endfor %}\
""")
return template.render(record=make_model(record, path))
def print_indx_info(record, path):
print format_record(record, path)
def main():
parser = argparse.ArgumentParser(description='Inspect '
'a given MFT file record.')
parser.add_argument('-a', action="store", metavar="cache_size", type=int,
dest="cache_size", default=1024,
help="Size of cache.")
parser.add_argument('-p', action="store", metavar="prefix",
nargs=1, dest="prefix", default="\\.",
help="Prefix paths with `prefix` rather than \\.\\")
parser.add_argument('-v', action="store_true", dest="verbose",
help="Print debugging information")
parser.add_argument('mft', action="store",
help="Path to MFT")
parser.add_argument('record_or_path', action="store",
help="MFT record or file path to inspect")
results = parser.parse_args()
if results.verbose:
logging.basicConfig(level=logging.DEBUG)
with Mmap(results.mft) as buf:
record_cache = Cache(results.cache_size)
path_cache = Cache(results.cache_size)
enum = MFTEnumerator(buf,
record_cache=record_cache,
path_cache=path_cache)
should_use_inode = False
try:
record_num = int(results.record_or_path)
should_use_inode = True
except ValueError:
should_use_inode = False
if should_use_inode:
record = enum.get_record(record_num)
path = results.prefix + enum.get_path(record)
print_indx_info(record, path)
else:
path = results.record_or_path
record = enum.get_record_by_path(path)
print_indx_info(record, results.prefix + path)
if __name__ == "__main__":
main()
|
|
# Two tcp flows between two host pairs, both flows go through same
# bottleneck but delay is different
#
# $Id: $
import sys
import datetime
from fabric.api import env
#
# Fabric config
#
# User and password
env.user = 'root'
env.password = 'rootpw'
# Set shell used to execute commands
env.shell = '/bin/sh -c'
#
# Testbed config
#
# Path to teacup scripts
TPCONF_script_path = '/home/teacup/teacup-0.8'
# DO NOT remove the following line
sys.path.append(TPCONF_script_path)
# Set debugging level (0 = no debugging info output)
TPCONF_debug_level = 0
# Host lists
TPCONF_router = ['newtcprt3', ]
TPCONF_hosts = [ 'newtcp20', 'newtcp21', 'newtcp27', 'newtcp28', ]
# Map external IPs to internal IPs
TPCONF_host_internal_ip = {
'newtcprt3': ['172.16.10.1', '172.16.11.1'],
'newtcp20': ['172.16.10.60'],
'newtcp21': ['172.16.10.61'],
'newtcp27': ['172.16.11.67'],
'newtcp28': ['172.16.11.68'],
}
#
# Reboot configuration
#
#
# Experiment settings
#
# Maximum allowed time difference between machines in seconds
# otherwise experiment will abort cause synchronisation problems
TPCONF_max_time_diff = 1
# Experiment name prefix used if not set on the command line
# The command line setting will overrule this config setting
now = datetime.datetime.today()
TPCONF_test_id = now.strftime("%Y%m%d-%H%M%S") + '_scenario4'
# Directory to store log files on remote host
TPCONF_remote_dir = '/tmp/'
# Time offset measurement traffic
# Enable broadcast ping on external/control interfaces
TPCONF_bc_ping_enable = '0'
# Specify rate of pings in packets/second
TPCONF_bc_ping_rate = 1
# Specify multicast address to use (must be broadcast or multicast address)
# If this is not specified, byt deafult the ping will be send to the subnet
# broadcast address.
TPCONF_bc_ping_address = '224.0.1.199'
#
# List of router queues/pipes
#
# Each entry is a tuple. The first value is the queue number and the second value
# is a comma separated list of parameters (see routersetup.py:init_pipe()).
# Queue numbers must be unique.
# Note that variable parameters must be either constants or or variable names
# defined by the experimenter. Variables are evaluated during runtime. Variable
# names must start with a 'V_'. Parameter names can only contain numbes, letter
# (upper and lower case), underscores (_), and hypen/minus (-).
# All variables must be defined in TPCONF_variable_list (see below).
# Note parameters must be configured appropriately for the router OS, e.g. there
# is no CoDel on FreeBSD; otherwise the experiment will abort witn an error.
TPCONF_router_queues = [
# Different delays
('1', " source='172.16.10.60', dest='172.16.11.67', delay=V_delay, "
" loss=V_loss, rate=V_up_rate, queue_disc=V_aqm, queue_size=V_bsize "),
('2', " source='172.16.11.67', dest='172.16.10.60', delay=V_delay, "
" loss=V_loss, rate=V_down_rate, queue_disc=V_aqm, queue_size=V_bsize "),
('3', " source='172.16.10.61', dest='172.16.11.68', delay=V_delay2, "
" loss=V_loss, rate=V_up_rate, queue_disc=V_aqm, queue_size=V_bsize, "
" attach_to_queue='1' "),
('4', " source='172.16.11.68', dest='172.16.10.61', delay=V_delay2, "
" loss=V_loss, rate=V_down_rate, queue_disc=V_aqm, queue_size=V_bsize, "
" attach_to_queue='2' "),
]
#
# List of traffic generators
#
# Each entry is a 3-tuple. the first value of the tuple must be a float and is the
# time relative to the start of the experiment when tasks are excuted. If two tasks
# have the same start time their start order is arbitrary. The second entry of the
# tuple is the task number and must be a unique integer (used as ID for the process).
# The last value of the tuple is a comma separated list of parameters (see the tasks
# defined in trafficgens.py); the first parameter of this list must be the
# task name.
# Client and server can be specified using the external/control IP addresses or host
# names. Then the actual interface used is the _first_ internal address (according to
# TPCONF_host_internal_ip). Alternativly, client and server can be specified as
# internal addresses, which allows to use any internal interfaces configured.
traffic_iperf = [
# Specifying external addresses traffic will be created using the _first_
# internal addresses (according to TPCONF_host_internal_ip)
('0.0', '1', " start_iperf, client='newtcp27', server='newtcp20', port=5000, "
" duration=V_duration "),
('0.0', '2', " start_iperf, client='newtcp28', server='newtcp21', port=5001, "
" duration=V_duration "),
]
# THIS is the traffic generator setup we will use
TPCONF_traffic_gens = traffic_iperf
#
# Traffic parameters
#
# Duration in seconds of traffic
TPCONF_duration = 30
# Number of runs for each setting
TPCONF_runs = 1
# TCP congestion control algorithm used
# Possible algos are: default, host<N>, newreno, cubic, cdg, hd, htcp, compound, vegas
# Note that the algo support is OS specific, so must ensure the right OS is booted
# Windows: newreno (default), compound
# FreeBSD: newreno (default), cubic, hd, htcp, cdg, vegas
# Linux: newreno, cubic (default), htcp, vegas
# Mac: newreno
# If you specify 'default' the default algorithm depending on the OS will be used
# If you specify 'host<N>' where <N> is an integer starting from 0 to then the
# algorithm will be the N-th algorithm specified for the host in TPCONF_host_TCP_algos
# (in case <N> is larger then the number of algorithms specified, it is set to 0
TPCONF_TCP_algos = ['newreno', ]
# Specify TCP congestion control algorithms used on each host
TPCONF_host_TCP_algos = {
}
# Specify TCP parameters for each host and each TCP congestion control algorithm
# Each parameter is of the form <sysctl name> = <value> where <value> can be a constant
# or a V_ variable
TPCONF_host_TCP_algo_params = {
}
# Specify arbitray commands that are executed on a host at the end of the host
# intialisation (after general host setup, ecn and tcp setup). The commands are
# executed in the shell as written after any V_ variables have been replaced.
# LIMITATION: only one V_ variable per command
TPCONF_host_init_custom_cmds = {
}
# Emulated delays in ms
TPCONF_delays = [5, 50]
TPCONF_delays2 = [5, 50]
# Emulated loss rates
TPCONF_loss_rates = [0]
# Emulated bandwidths (downstream, upstream)
TPCONF_bandwidths = [
('8mbit', '1mbit'),
('20mbit', '1.4mbit'),
]
# AQM
# Linux: fifo (mapped to pfifo), pfifo, bfifo, fq_codel, codel, pie, red, ...
# (see tc man page for full list)
# FreeBSD: fifo, red
TPCONF_aqms = ['pfifo', ]
# Buffer size
# If router is Linux this is mostly in packets/slots, but it depends on AQM
# (e.g. for bfifo it's bytes)
# If router is FreeBSD this would be in slots by default, but we can specify byte sizes
# (e.g. we can specify 4Kbytes)
TPCONF_buffer_sizes = [100]
#
# List of all parameters that can be varied and default values
#
# The key of each item is the identifier that can be used in TPCONF_vary_parameters
# (see below).
# The value of each item is a 4-tuple. First, a list of variable names.
# Second, a list of short names uses for the file names.
# For each parameter varied a string '_<short_name>_<value>' is appended to the log
# file names (appended to chosen prefix). Note, short names should only be letters
# from a-z or A-Z. Do not use underscores or hyphens!
# Third, the list of parameters values. If there is more than one variable this must
# be a list of tuples, each tuple having the same number of items as teh number of
# variables. Fourth, an optional dictionary with additional variables, where the keys
# are the variable names and the values are the variable values.
TPCONF_parameter_list = {
# Vary name V_ variable file name values extra vars
'delays' : (['V_delay'], ['del1'], TPCONF_delays, {}),
'delays2' : (['V_delay2'], ['del2'], TPCONF_delays2, {}),
'loss' : (['V_loss'], ['loss'], TPCONF_loss_rates, {}),
'tcpalgos' : (['V_tcp_cc_algo'],['tcp'], TPCONF_TCP_algos, {}),
'aqms' : (['V_aqm'], ['aqm'], TPCONF_aqms, {}),
'bsizes' : (['V_bsize'], ['bs'], TPCONF_buffer_sizes, {}),
'runs' : (['V_runs'], ['run'], range(TPCONF_runs), {}),
'bandwidths' : (['V_down_rate', 'V_up_rate'], ['down', 'up'], TPCONF_bandwidths, {}),
}
# Default setting for variables (used for variables if not varied)
# The key of each item is the parameter name. The value of each item is the default
# parameter value used if the variable is not varied.
TPCONF_variable_defaults = {
# V_ variable value
'V_duration' : TPCONF_duration,
'V_delay' : TPCONF_delays[0],
'V_delay2' : TPCONF_delays2[0],
'V_loss' : TPCONF_loss_rates[0],
'V_tcp_cc_algo' : TPCONF_TCP_algos[0],
'V_down_rate' : TPCONF_bandwidths[0][0],
'V_up_rate' : TPCONF_bandwidths[0][1],
'V_aqm' : TPCONF_aqms[0],
'V_bsize' : TPCONF_buffer_sizes[0],
}
# Specify the parameters we vary through all values, all others will be fixed
# according to TPCONF_variable_defaults
TPCONF_vary_parameters = ['delays', 'delays2', 'bandwidths', 'aqms', 'runs',]
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import now_datetime, cint
import re
def set_new_name(doc):
"""Sets the `name`` property for the document based on various rules.
1. If amened doc, set suffix.
3. If `autoname` method is declared, then call it.
4. If `autoname` property is set in the DocType (`meta`), then build it using the `autoname` property.
2. If `name` is already defined, use that name
5. If no rule defined, use hash.
#### Note:
:param doc: Document to be named."""
doc.run_method("before_naming")
autoname = frappe.get_meta(doc.doctype).autoname or ""
if autoname.lower() != "prompt" and not frappe.flags.in_import:
doc.name = None
if getattr(doc, "amended_from", None):
_set_amended_name(doc)
return
elif getattr(doc.meta, "issingle", False):
doc.name = doc.doctype
else:
doc.run_method("autoname")
if not doc.name and autoname:
if autoname.startswith('field:'):
fieldname = autoname[6:]
doc.name = (doc.get(fieldname) or "").strip()
if not doc.name:
frappe.throw(_("{0} is required").format(doc.meta.get_label(fieldname)))
raise Exception, 'Name is required'
if autoname.startswith("naming_series:"):
set_name_by_naming_series(doc)
elif "#" in autoname:
doc.name = make_autoname(autoname)
elif autoname.lower()=='prompt':
# set from __newname in save.py
if not doc.name:
frappe.throw(_("Name not set via prompt"))
if not doc.name or autoname=='hash':
doc.name = make_autoname('hash', doc.doctype)
doc.name = validate_name(doc.doctype, doc.name, frappe.get_meta(doc.doctype).get_field("name_case"))
def set_name_by_naming_series(doc):
"""Sets name by the `naming_series` property"""
if not doc.naming_series:
doc.naming_series = get_default_naming_series(doc.doctype)
if not doc.naming_series:
frappe.throw(frappe._("Naming Series mandatory"))
doc.name = make_autoname(doc.naming_series+'.#####', '', doc)
def make_autoname(key='', doctype='', doc=''):
"""
Creates an autoname from the given key:
**Autoname rules:**
* The key is separated by '.'
* '####' represents a series. The string before this part becomes the prefix:
Example: ABC.#### creates a series ABC0001, ABC0002 etc
* 'MM' represents the current month
* 'YY' and 'YYYY' represent the current year
*Example:*
* DE/./.YY./.MM./.##### will create a series like
DE/09/01/0001 where 09 is the year, 01 is the month and 0001 is the series
"""
if key=="hash":
return frappe.generate_hash(doctype, 10)
if not "#" in key:
key = key + ".#####"
elif not "." in key:
frappe.throw(_("Invalid naming series (. missing)") + (_(" for {0}").format(doctype) if doctype else ""))
n = ''
l = key.split('.')
series_set = False
today = now_datetime()
for e in l:
part = ''
if e.startswith('#'):
if not series_set:
digits = len(e)
part = getseries(n, digits, doctype)
series_set = True
elif e=='YY':
part = today.strftime('%y')
elif e=='MM':
part = today.strftime('%m')
elif e=='DD':
part = today.strftime("%d")
elif e=='YYYY':
part = today.strftime('%Y')
elif doc and doc.get(e):
part = doc.get(e)
else: part = e
if isinstance(part, basestring):
n+=part
return n
def getseries(key, digits, doctype=''):
# series created ?
current = frappe.db.sql("select `current` from `tabSeries` where name=%s for update", (key,))
if current and current[0][0] is not None:
current = current[0][0]
# yes, update it
frappe.db.sql("update tabSeries set current = current+1 where name=%s", (key,))
current = cint(current) + 1
else:
# no, create it
frappe.db.sql("insert into tabSeries (name, current) values (%s, 1)", (key,))
current = 1
return ('%0'+str(digits)+'d') % current
def revert_series_if_last(key, name):
if ".#" in key:
prefix, hashes = key.rsplit(".", 1)
if "#" not in hashes:
return
else:
prefix = key
count = cint(name.replace(prefix, ""))
current = frappe.db.sql("select `current` from `tabSeries` where name=%s for update", (prefix,))
if current and current[0][0]==count:
frappe.db.sql("update tabSeries set current=current-1 where name=%s", prefix)
def get_default_naming_series(doctype):
"""get default value for `naming_series` property"""
naming_series = frappe.get_meta(doctype).get_field("naming_series").options or ""
if naming_series:
naming_series = naming_series.split("\n")
return naming_series[0] or naming_series[1]
else:
return None
def validate_name(doctype, name, case=None, merge=False):
if not name: return 'No Name Specified for %s' % doctype
if name.startswith('New '+doctype):
frappe.throw(_('There were some errors setting the name, please contact the administrator'), frappe.NameError)
if case=='Title Case': name = name.title()
if case=='UPPER CASE': name = name.upper()
name = name.strip()
if not frappe.get_meta(doctype).get("issingle") and (doctype == name) and (name!="DocType"):
frappe.throw(_("Name of {0} cannot be {1}").format(doctype, name), frappe.NameError)
special_characters = "<>"
if re.findall("[{0}]+".format(special_characters), name):
message = ", ".join("'{0}'".format(c) for c in special_characters)
frappe.throw(_("Name cannot contain special characters like {0}").format(message), frappe.NameError)
return name
def _set_amended_name(doc):
am_id = 1
am_prefix = doc.amended_from
if frappe.db.get_value(doc.doctype, doc.amended_from, "amended_from"):
am_id = cint(doc.amended_from.split('-')[-1]) + 1
am_prefix = '-'.join(doc.amended_from.split('-')[:-1]) # except the last hyphen
doc.name = am_prefix + '-' + str(am_id)
return doc.name
def append_number_if_name_exists(doc):
if frappe.db.exists(doc.doctype, doc.name):
last = frappe.db.sql("""select name from `tab{}`
where name regexp '^{}-[[:digit:]]+'
order by length(name) desc, name desc limit 1""".format(doc.doctype, doc.name))
if last:
count = str(cint(last[0][0].rsplit("-", 1)[1]) + 1)
else:
count = "1"
doc.name = "{0}-{1}".format(doc.name, count)
return doc
def de_duplicate(doctype, name):
original_name = name
count = 0
while True:
if frappe.db.exists(doctype, name):
count += 1
name = "{0}-{1}".format(original_name, count)
else:
break
return name
|
|
# -*- coding: utf-8 -*-
"""App utilities: Compat settings, bug-report tool, pickling apps."""
from __future__ import absolute_import, unicode_literals
import os
import platform as _platform
import re
from collections import Mapping, namedtuple
from copy import deepcopy
from types import ModuleType
from kombu.utils.url import maybe_sanitize_url
from celery.exceptions import ImproperlyConfigured
from celery.five import items, keys, string_t, values
from celery.platforms import pyimplementation
from celery.utils.collections import ConfigurationView
from celery.utils.imports import import_from_cwd, qualname, symbol_by_name
from celery.utils.text import pretty
from .defaults import (_OLD_DEFAULTS, _OLD_SETTING_KEYS, _TO_NEW_KEY,
_TO_OLD_KEY, DEFAULTS, SETTING_KEYS, find)
__all__ = (
'Settings', 'appstr', 'bugreport',
'filter_hidden_settings', 'find_app',
)
#: Format used to generate bug-report information.
BUGREPORT_INFO = """
software -> celery:{celery_v} kombu:{kombu_v} py:{py_v}
billiard:{billiard_v} {driver_v}
platform -> system:{system} arch:{arch} imp:{py_i}
loader -> {loader}
settings -> transport:{transport} results:{results}
{human_settings}
"""
HIDDEN_SETTINGS = re.compile(
'API|TOKEN|KEY|SECRET|PASS|PROFANITIES_LIST|SIGNATURE|DATABASE',
re.IGNORECASE,
)
E_MIX_OLD_INTO_NEW = """
Cannot mix new and old setting keys, please rename the
following settings to the new format:
{renames}
"""
E_MIX_NEW_INTO_OLD = """
Cannot mix new setting names with old setting names, please
rename the following settings to use the old format:
{renames}
Or change all of the settings to use the new format :)
"""
FMT_REPLACE_SETTING = '{replace:<36} -> {with_}'
def appstr(app):
"""String used in __repr__ etc, to id app instances."""
return '{0} at {1:#x}'.format(app.main or '__main__', id(app))
class Settings(ConfigurationView):
"""Celery settings object.
.. seealso:
:ref:`configuration` for a full list of configuration keys.
"""
@property
def broker_read_url(self):
return (
os.environ.get('CELERY_BROKER_READ_URL') or
self.get('broker_read_url') or
self.broker_url
)
@property
def broker_write_url(self):
return (
os.environ.get('CELERY_BROKER_WRITE_URL') or
self.get('broker_write_url') or
self.broker_url
)
@property
def broker_url(self):
return (
os.environ.get('CELERY_BROKER_URL') or
self.first('broker_url', 'broker_host')
)
@property
def result_backend(self):
return (
os.environ.get('CELERY_RESULT_BACKEND') or
self.get('CELERY_RESULT_BACKEND')
)
@property
def task_default_exchange(self):
return self.first(
'task_default_exchange',
'task_default_queue',
)
@property
def task_default_routing_key(self):
return self.first(
'task_default_routing_key',
'task_default_queue',
)
@property
def timezone(self):
# this way we also support django's time zone.
return self.first('timezone', 'time_zone')
def without_defaults(self):
"""Return the current configuration, but without defaults."""
# the last stash is the default settings, so just skip that
return Settings({}, self.maps[:-1])
def value_set_for(self, key):
return key in self.without_defaults()
def find_option(self, name, namespace=''):
"""Search for option by name.
Example:
>>> from proj.celery import app
>>> app.conf.find_option('disable_rate_limits')
('worker', 'prefetch_multiplier',
<Option: type->bool default->False>))
Arguments:
name (str): Name of option, cannot be partial.
namespace (str): Preferred name-space (``None`` by default).
Returns:
Tuple: of ``(namespace, key, type)``.
"""
return find(name, namespace)
def find_value_for_key(self, name, namespace='celery'):
"""Shortcut to ``get_by_parts(*find_option(name)[:-1])``."""
return self.get_by_parts(*self.find_option(name, namespace)[:-1])
def get_by_parts(self, *parts):
"""Return the current value for setting specified as a path.
Example:
>>> from proj.celery import app
>>> app.conf.get_by_parts('worker', 'disable_rate_limits')
False
"""
return self['_'.join(part for part in parts if part)]
def finalize(self):
# See PendingConfiguration in celery/app/base.py
# first access will read actual configuration.
try:
self['__bogus__']
except KeyError:
pass
return self
def table(self, with_defaults=False, censored=True):
filt = filter_hidden_settings if censored else lambda v: v
dict_members = dir(dict)
self.finalize()
return filt({
k: v for k, v in items(
self if with_defaults else self.without_defaults())
if not k.startswith('_') and k not in dict_members
})
def humanize(self, with_defaults=False, censored=True):
"""Return a human readable text showing configuration changes."""
return '\n'.join(
'{0}: {1}'.format(key, pretty(value, width=50))
for key, value in items(self.table(with_defaults, censored)))
def _new_key_to_old(key, convert=_TO_OLD_KEY.get):
return convert(key, key)
def _old_key_to_new(key, convert=_TO_NEW_KEY.get):
return convert(key, key)
_settings_info_t = namedtuple('settings_info_t', (
'defaults', 'convert', 'key_t', 'mix_error',
))
_settings_info = _settings_info_t(
DEFAULTS, _TO_NEW_KEY, _old_key_to_new, E_MIX_OLD_INTO_NEW,
)
_old_settings_info = _settings_info_t(
_OLD_DEFAULTS, _TO_OLD_KEY, _new_key_to_old, E_MIX_NEW_INTO_OLD,
)
def detect_settings(conf, preconf={}, ignore_keys=set(), prefix=None,
all_keys=SETTING_KEYS, old_keys=_OLD_SETTING_KEYS):
source = conf
if conf is None:
source, conf = preconf, {}
have = set(keys(source)) - ignore_keys
is_in_new = have.intersection(all_keys)
is_in_old = have.intersection(old_keys)
info = None
if is_in_new:
# have new setting names
info, left = _settings_info, is_in_old
if is_in_old and len(is_in_old) > len(is_in_new):
# Majority of the settings are old.
info, left = _old_settings_info, is_in_new
if is_in_old:
# have old setting names, or a majority of the names are old.
if not info:
info, left = _old_settings_info, is_in_new
if is_in_new and len(is_in_new) > len(is_in_old):
# Majority of the settings are new
info, left = _settings_info, is_in_old
else:
# no settings, just use new format.
info, left = _settings_info, is_in_old
if prefix:
# always use new format if prefix is used.
info, left = _settings_info, set()
# only raise error for keys that the user didn't provide two keys
# for (e.g., both ``result_expires`` and ``CELERY_TASK_RESULT_EXPIRES``).
really_left = {key for key in left if info.convert[key] not in have}
if really_left:
# user is mixing old/new, or new/old settings, give renaming
# suggestions.
raise ImproperlyConfigured(info.mix_error.format(renames='\n'.join(
FMT_REPLACE_SETTING.format(replace=key, with_=info.convert[key])
for key in sorted(really_left)
)))
preconf = {info.convert.get(k, k): v for k, v in items(preconf)}
defaults = dict(deepcopy(info.defaults), **preconf)
return Settings(
preconf, [conf, defaults],
(_old_key_to_new, _new_key_to_old),
prefix=prefix,
)
class AppPickler(object):
"""Old application pickler/unpickler (< 3.1)."""
def __call__(self, cls, *args):
kwargs = self.build_kwargs(*args)
app = self.construct(cls, **kwargs)
self.prepare(app, **kwargs)
return app
def prepare(self, app, **kwargs):
app.conf.update(kwargs['changes'])
def build_kwargs(self, *args):
return self.build_standard_kwargs(*args)
def build_standard_kwargs(self, main, changes, loader, backend, amqp,
events, log, control, accept_magic_kwargs,
config_source=None):
return {'main': main, 'loader': loader, 'backend': backend,
'amqp': amqp, 'changes': changes, 'events': events,
'log': log, 'control': control, 'set_as_current': False,
'config_source': config_source}
def construct(self, cls, **kwargs):
return cls(**kwargs)
def _unpickle_app(cls, pickler, *args):
"""Rebuild app for versions 2.5+."""
return pickler()(cls, *args)
def _unpickle_app_v2(cls, kwargs):
"""Rebuild app for versions 3.1+."""
kwargs['set_as_current'] = False
return cls(**kwargs)
def filter_hidden_settings(conf):
"""Filter sensitive settings."""
def maybe_censor(key, value, mask='*' * 8):
if isinstance(value, Mapping):
return filter_hidden_settings(value)
if isinstance(key, string_t):
if HIDDEN_SETTINGS.search(key):
return mask
elif 'broker_url' in key.lower():
from kombu import Connection
return Connection(value).as_uri(mask=mask)
elif 'backend' in key.lower():
return maybe_sanitize_url(value, mask=mask)
return value
return {k: maybe_censor(k, v) for k, v in items(conf)}
def bugreport(app):
"""Return a string containing information useful in bug-reports."""
import billiard
import celery
import kombu
try:
conn = app.connection()
driver_v = '{0}:{1}'.format(conn.transport.driver_name,
conn.transport.driver_version())
transport = conn.transport_cls
except Exception: # pylint: disable=broad-except
transport = driver_v = ''
return BUGREPORT_INFO.format(
system=_platform.system(),
arch=', '.join(x for x in _platform.architecture() if x),
py_i=pyimplementation(),
celery_v=celery.VERSION_BANNER,
kombu_v=kombu.__version__,
billiard_v=billiard.__version__,
py_v=_platform.python_version(),
driver_v=driver_v,
transport=transport,
results=maybe_sanitize_url(app.conf.result_backend or 'disabled'),
human_settings=app.conf.humanize(),
loader=qualname(app.loader.__class__),
)
def find_app(app, symbol_by_name=symbol_by_name, imp=import_from_cwd):
"""Find app by name."""
from .base import Celery
try:
sym = symbol_by_name(app, imp=imp)
except AttributeError:
# last part was not an attribute, but a module
sym = imp(app)
if isinstance(sym, ModuleType) and ':' not in app:
try:
found = sym.app
if isinstance(found, ModuleType):
raise AttributeError()
except AttributeError:
try:
found = sym.celery
if isinstance(found, ModuleType):
raise AttributeError()
except AttributeError:
if getattr(sym, '__path__', None):
try:
return find_app(
'{0}.celery'.format(app),
symbol_by_name=symbol_by_name, imp=imp,
)
except ImportError:
pass
for suspect in values(vars(sym)):
if isinstance(suspect, Celery):
return suspect
raise
else:
return found
else:
return found
return sym
|
|
from __future__ import unicode_literals
import time
import unittest
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from django.test import TestCase, RequestFactory
from django.views.generic import View, TemplateView, RedirectView
from . import views
class SimpleView(View):
"""
A simple view with a docstring.
"""
def get(self, request):
return HttpResponse('This is a simple view')
class SimplePostView(SimpleView):
post = SimpleView.get
class PostOnlyView(View):
def post(self, request):
return HttpResponse('This view only accepts POST')
class CustomizableView(SimpleView):
parameter = {}
def decorator(view):
view.is_decorated = True
return view
class DecoratedDispatchView(SimpleView):
@decorator
def dispatch(self, request, *args, **kwargs):
return super(DecoratedDispatchView, self).dispatch(request, *args, **kwargs)
class AboutTemplateView(TemplateView):
def get(self, request):
return self.render_to_response({})
def get_template_names(self):
return ['generic_views/about.html']
class AboutTemplateAttributeView(TemplateView):
template_name = 'generic_views/about.html'
def get(self, request):
return self.render_to_response(context={})
class InstanceView(View):
def get(self, request):
return self
class ViewTest(unittest.TestCase):
rf = RequestFactory()
def _assert_simple(self, response):
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'This is a simple view')
def test_no_init_kwargs(self):
"""
Test that a view can't be accidentally instantiated before deployment
"""
try:
SimpleView(key='value').as_view()
self.fail('Should not be able to instantiate a view')
except AttributeError:
pass
def test_no_init_args(self):
"""
Test that a view can't be accidentally instantiated before deployment
"""
try:
SimpleView.as_view('value')
self.fail('Should not be able to use non-keyword arguments instantiating a view')
except TypeError:
pass
def test_pathological_http_method(self):
"""
The edge case of a http request that spoofs an existing method name is caught.
"""
self.assertEqual(SimpleView.as_view()(
self.rf.get('/', REQUEST_METHOD='DISPATCH')
).status_code, 405)
def test_get_only(self):
"""
Test a view which only allows GET doesn't allow other methods.
"""
self._assert_simple(SimpleView.as_view()(self.rf.get('/')))
self.assertEqual(SimpleView.as_view()(self.rf.post('/')).status_code, 405)
self.assertEqual(SimpleView.as_view()(
self.rf.get('/', REQUEST_METHOD='FAKE')
).status_code, 405)
def test_get_and_head(self):
"""
Test a view which supplies a GET method also responds correctly to HEAD.
"""
self._assert_simple(SimpleView.as_view()(self.rf.get('/')))
response = SimpleView.as_view()(self.rf.head('/'))
self.assertEqual(response.status_code, 200)
def test_head_no_get(self):
"""
Test a view which supplies no GET method responds to HEAD with HTTP 405.
"""
response = PostOnlyView.as_view()(self.rf.head('/'))
self.assertEqual(response.status_code, 405)
def test_get_and_post(self):
"""
Test a view which only allows both GET and POST.
"""
self._assert_simple(SimplePostView.as_view()(self.rf.get('/')))
self._assert_simple(SimplePostView.as_view()(self.rf.post('/')))
self.assertEqual(SimplePostView.as_view()(
self.rf.get('/', REQUEST_METHOD='FAKE')
).status_code, 405)
def test_invalid_keyword_argument(self):
"""
Test that view arguments must be predefined on the class and can't
be named like a HTTP method.
"""
# Check each of the allowed method names
for method in SimpleView.http_method_names:
kwargs = dict(((method, "value"),))
self.assertRaises(TypeError, SimpleView.as_view, **kwargs)
# Check the case view argument is ok if predefined on the class...
CustomizableView.as_view(parameter="value")
# ...but raises errors otherwise.
self.assertRaises(TypeError, CustomizableView.as_view, foobar="value")
def test_calling_more_than_once(self):
"""
Test a view can only be called once.
"""
request = self.rf.get('/')
view = InstanceView.as_view()
self.assertNotEqual(view(request), view(request))
def test_class_attributes(self):
"""
Test that the callable returned from as_view() has proper
docstring, name and module.
"""
self.assertEqual(SimpleView.__doc__, SimpleView.as_view().__doc__)
self.assertEqual(SimpleView.__name__, SimpleView.as_view().__name__)
self.assertEqual(SimpleView.__module__, SimpleView.as_view().__module__)
def test_dispatch_decoration(self):
"""
Test that attributes set by decorators on the dispatch method
are also present on the closure.
"""
self.assertTrue(DecoratedDispatchView.as_view().is_decorated)
def test_options(self):
"""
Test that views respond to HTTP OPTIONS requests with an Allow header
appropriate for the methods implemented by the view class.
"""
request = self.rf.options('/')
view = SimpleView.as_view()
response = view(request)
self.assertEqual(200, response.status_code)
self.assertTrue(response['Allow'])
def test_options_for_get_view(self):
"""
Test that a view implementing GET allows GET and HEAD.
"""
request = self.rf.options('/')
view = SimpleView.as_view()
response = view(request)
self._assert_allows(response, 'GET', 'HEAD')
def test_options_for_get_and_post_view(self):
"""
Test that a view implementing GET and POST allows GET, HEAD, and POST.
"""
request = self.rf.options('/')
view = SimplePostView.as_view()
response = view(request)
self._assert_allows(response, 'GET', 'HEAD', 'POST')
def test_options_for_post_view(self):
"""
Test that a view implementing POST allows POST.
"""
request = self.rf.options('/')
view = PostOnlyView.as_view()
response = view(request)
self._assert_allows(response, 'POST')
def _assert_allows(self, response, *expected_methods):
"Assert allowed HTTP methods reported in the Allow response header"
response_allows = set(response['Allow'].split(', '))
self.assertEqual(set(expected_methods + ('OPTIONS',)), response_allows)
def test_args_kwargs_request_on_self(self):
"""
Test a view only has args, kwargs & request once `as_view`
has been called.
"""
bare_view = InstanceView()
view = InstanceView.as_view()(self.rf.get('/'))
for attribute in ('args', 'kwargs', 'request'):
self.assertNotIn(attribute, dir(bare_view))
self.assertIn(attribute, dir(view))
class TemplateViewTest(TestCase):
urls = 'generic_views.urls'
rf = RequestFactory()
def _assert_about(self, response):
response.render()
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<h1>About</h1>')
def test_get(self):
"""
Test a view that simply renders a template on GET
"""
self._assert_about(AboutTemplateView.as_view()(self.rf.get('/about/')))
def test_head(self):
"""
Test a TemplateView responds correctly to HEAD
"""
response = AboutTemplateView.as_view()(self.rf.head('/about/'))
self.assertEqual(response.status_code, 200)
def test_get_template_attribute(self):
"""
Test a view that renders a template on GET with the template name as
an attribute on the class.
"""
self._assert_about(AboutTemplateAttributeView.as_view()(self.rf.get('/about/')))
def test_get_generic_template(self):
"""
Test a completely generic view that renders a template on GET
with the template name as an argument at instantiation.
"""
self._assert_about(TemplateView.as_view(template_name='generic_views/about.html')(self.rf.get('/about/')))
def test_template_name_required(self):
"""
A template view must provide a template name
"""
self.assertRaises(ImproperlyConfigured, self.client.get, '/template/no_template/')
def test_template_params(self):
"""
A generic template view passes kwargs as context.
"""
response = self.client.get('/template/simple/bar/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['foo'], 'bar')
self.assertIsInstance(response.context['view'], View)
def test_extra_template_params(self):
"""
A template view can be customized to return extra context.
"""
response = self.client.get('/template/custom/bar/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['foo'], 'bar')
self.assertEqual(response.context['key'], 'value')
self.assertIsInstance(response.context['view'], View)
def test_cached_views(self):
"""
A template view can be cached
"""
response = self.client.get('/template/cached/bar/')
self.assertEqual(response.status_code, 200)
time.sleep(1.0)
response2 = self.client.get('/template/cached/bar/')
self.assertEqual(response2.status_code, 200)
self.assertEqual(response.content, response2.content)
time.sleep(2.0)
# Let the cache expire and test again
response2 = self.client.get('/template/cached/bar/')
self.assertEqual(response2.status_code, 200)
self.assertNotEqual(response.content, response2.content)
def test_content_type(self):
response = self.client.get('/template/content_type/')
self.assertEqual(response['Content-Type'], 'text/plain')
class RedirectViewTest(TestCase):
urls = 'generic_views.urls'
rf = RequestFactory()
def test_no_url(self):
"Without any configuration, returns HTTP 410 GONE"
response = RedirectView.as_view()(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 410)
def test_permanent_redirect(self):
"Default is a permanent redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, '/bar/')
def test_temporary_redirect(self):
"Permanent redirects are an option"
response = RedirectView.as_view(url='/bar/', permanent=False)(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_include_args(self):
"GET arguments can be included in the redirected URL"
response = RedirectView.as_view(url='/bar/')(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, '/bar/')
response = RedirectView.as_view(url='/bar/', query_string=True)(self.rf.get('/foo/?pork=spam'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, '/bar/?pork=spam')
def test_include_urlencoded_args(self):
"GET arguments can be URL-encoded when included in the redirected URL"
response = RedirectView.as_view(url='/bar/', query_string=True)(
self.rf.get('/foo/?unicode=%E2%9C%93'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, '/bar/?unicode=%E2%9C%93')
def test_parameter_substitution(self):
"Redirection URLs can be parameterized"
response = RedirectView.as_view(url='/bar/%(object_id)d/')(self.rf.get('/foo/42/'), object_id=42)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, '/bar/42/')
def test_named_url_pattern(self):
"Named pattern parameter should reverse to the matching pattern"
response = RedirectView.as_view(pattern_name='artist_detail')(self.rf.get('/foo/'), pk=1)
self.assertEqual(response.status_code, 301)
self.assertEqual(response['Location'], '/detail/artist/1/')
def test_named_url_pattern_using_args(self):
response = RedirectView.as_view(pattern_name='artist_detail')(self.rf.get('/foo/'), 1)
self.assertEqual(response.status_code, 301)
self.assertEqual(response['Location'], '/detail/artist/1/')
def test_wrong_named_url_pattern(self):
"A wrong pattern name returns 410 GONE"
response = RedirectView.as_view(pattern_name='wrong.pattern_name')(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 410)
def test_redirect_POST(self):
"Default is a permanent redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.post('/foo/'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, '/bar/')
def test_redirect_HEAD(self):
"Default is a permanent redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.head('/foo/'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, '/bar/')
def test_redirect_OPTIONS(self):
"Default is a permanent redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.options('/foo/'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, '/bar/')
def test_redirect_PUT(self):
"Default is a permanent redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.put('/foo/'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, '/bar/')
def test_redirect_PATCH(self):
"Default is a permanent redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.patch('/foo/'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, '/bar/')
def test_redirect_DELETE(self):
"Default is a permanent redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.delete('/foo/'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, '/bar/')
def test_redirect_when_meta_contains_no_query_string(self):
"regression for #16705"
# we can't use self.rf.get because it always sets QUERY_STRING
response = RedirectView.as_view(url='/bar/')(self.rf.request(PATH_INFO='/foo/'))
self.assertEqual(response.status_code, 301)
class GetContextDataTest(unittest.TestCase):
def test_get_context_data_super(self):
test_view = views.CustomContextView()
context = test_view.get_context_data(kwarg_test='kwarg_value')
# the test_name key is inserted by the test classes parent
self.assertTrue('test_name' in context)
self.assertEqual(context['kwarg_test'], 'kwarg_value')
self.assertEqual(context['custom_key'], 'custom_value')
# test that kwarg overrides values assigned higher up
context = test_view.get_context_data(test_name='test_value')
self.assertEqual(context['test_name'], 'test_value')
def test_object_at_custom_name_in_context_data(self):
# Checks 'pony' key presence in dict returned by get_context_date
test_view = views.CustomSingleObjectView()
test_view.context_object_name = 'pony'
context = test_view.get_context_data()
self.assertEqual(context['pony'], test_view.object)
def test_object_in_get_context_data(self):
# Checks 'object' key presence in dict returned by get_context_date #20234
test_view = views.CustomSingleObjectView()
context = test_view.get_context_data()
self.assertEqual(context['object'], test_view.object)
class UseMultipleObjectMixinTest(unittest.TestCase):
rf = RequestFactory()
def test_use_queryset_from_view(self):
test_view = views.CustomMultipleObjectMixinView()
test_view.get(self.rf.get('/'))
# Don't pass queryset as argument
context = test_view.get_context_data()
self.assertEqual(context['object_list'], test_view.queryset)
def test_overwrite_queryset(self):
test_view = views.CustomMultipleObjectMixinView()
test_view.get(self.rf.get('/'))
queryset = [{'name': 'Lennon'}, {'name': 'Ono'}]
self.assertNotEqual(test_view.queryset, queryset)
# Overwrite the view's queryset with queryset from kwarg
context = test_view.get_context_data(object_list=queryset)
self.assertEqual(context['object_list'], queryset)
class SingleObjectTemplateResponseMixinTest(unittest.TestCase):
def test_template_mixin_without_template(self):
"""
We want to makes sure that if you use a template mixin, but forget the
template, it still tells you it's ImproperlyConfigured instead of
TemplateDoesNotExist.
"""
view = views.TemplateResponseWithoutTemplate()
self.assertRaises(ImproperlyConfigured, view.get_template_names)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Utility functions
"""
import code
import contextlib
import functools
import itertools
import os
import re
import sys
import traceback
try:
import readline as rlmodule
except ImportError: # pragma: no cover
rlmodule = None
try:
import termcolor as tcmodule
except ImportError: # pragma: no cover
tcmodule = None
__author__ = "Simone Campagna"
__copyright__ = 'Copyright (c) 2016 Simone Campagna'
__license__ = 'Apache License Version 2.0'
__all__ = (
'rlmodule',
'tcmodule',
'rl_set_completer',
'rl_escape_prompt',
'tabulate',
'make_title',
'filter_completion',
'get_element',
'input_function',
'swap_history',
'PyShell',
'rendered_text_len',
'traced',
)
# pylint: disable=too-few-public-methods
def configure_readline():
"""Configure the readline module"""
if rlmodule is not None: # pragma: no cover
delims = list(rlmodule.get_completer_delims())
for char in {'-', '"', "'", '!', '@', '?'}:
if char in delims:
delims.remove(char)
rlmodule.set_completer_delims(''.join(delims))
configure_readline()
_COMPLETER_STACK = []
@contextlib.contextmanager
def rl_set_completer(completer):
"""Context manager to temporarily replace the readline completer"""
if rlmodule is not None:
_COMPLETER_STACK.append(rlmodule.get_completer())
rlmodule.set_completer(completer)
try:
yield
finally:
if rlmodule is not None:
rlmodule.set_completer(_COMPLETER_STACK.pop(-1))
def rl_escape_prompt(prompt):
"""Add rl completer escape chars"""
regexp = re.compile(r'\x1b\[\d+m')
s_prompt = prompt
offset = 0
pre = '\001'
post = '\002'
for match in regexp.finditer(prompt):
begin, end = match.span()
s_prompt = ''.join([
s_prompt[:offset + begin],
pre,
s_prompt[offset + begin:offset + end],
post,
s_prompt[offset + end:]
])
offset += len(pre) + len(post)
return s_prompt
def grouper(iterable, number, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * number
return itertools.zip_longest(*args, fillvalue=fillvalue)
def rendered_text_len(text):
"""Returns the actual length of a text when displayed, by removing
all text formattings
Parameters
----------
text: str
the text to be rendered
Returns
-------
int
the actual rendered text len
"""
return len(re.sub(r'\x1b\[\d{1,2}m', '', text))
def make_title(title, fill="~", length=80):
"""Make a title"""
text = "{e:{fill}<3s} {t} ".format(t=title, fill=fill, e="")
return text + fill * max(0, length - rendered_text_len(text))
def tabulate(values, *, stream, title=None, footer=None, line_length=80):
"""Tabulate values"""
if title:
stream.write(title + '\n')
if values:
lmax = max(rendered_text_len(value) for value in values)
nmax = max(1, line_length // (lmax + 1))
fmt = "{{:<{lmax}s}}".format(lmax=lmax)
for subiter in grouper(values, nmax):
line = " ".join(fmt.format(item) for item in subiter if item is not None)
stream.write(line + '\n')
if footer:
stream.write(footer + '\n')
def filter_completion(text, sequence):
"""Return items starting with text
Parameters
----------
text: str
the completion text
sequence: iterable
a sequence of strings
Yields
------
str
a matching string
"""
for item in sequence:
if item.startswith(text):
yield item
def get_element(root, name):
"""Return the element named `name`.
Parameters
----------
root: Element
the base element
name: str
the element name; relative names like '.comm', '..grp.comm' are accepted.
Returns
-------
Element:
the requested element
"""
stripped_name = name.lstrip('.')
num_dots = max(0, len(name) - len(stripped_name) - 1)
owner = root
for dummy in range(num_dots):
owner = owner.parent
if owner is None:
raise KeyError("element {!r} not found in {}".format(name, root))
args = ()
element = owner
for subcmd in stripped_name.split("."):
subelement, subargs = element.get_element(subcmd)
if subelement is None:
raise KeyError("element {!r} not found in {}".format(subcmd, element))
element = subelement
args = args + tuple(subargs)
return element, args
def input_function(prompt="", history=True):
"""Reads an input line, with readline disabled"""
remove_history = rlmodule is not None and not history
if remove_history:
hcurlen = rlmodule.get_current_history_length()
hmaxlen = rlmodule.get_history_length()
rlmodule.set_history_length(hmaxlen + 1)
try:
answer = input(prompt)
finally:
if remove_history:
hlen = rlmodule.get_current_history_length()
for i in range(hcurlen, hlen):
rlmodule.remove_history_item(i)
rlmodule.set_history_length(hmaxlen)
return answer
@contextlib.contextmanager
def swap_history(filename=None, history_length=100):
"""Pause the current readline buffer and starts a new one"""
if rlmodule is None: # pragma: no cover
yield
return
else:
saved_history_length = rlmodule.get_history_length()
history = []
for i in range(1, rlmodule.get_current_history_length() + 1):
history.append(rlmodule.get_history_item(i))
rlmodule.clear_history()
try:
if filename:
if os.path.exists(filename):
rlmodule.read_history_file(filename)
rlmodule.set_history_length(history_length)
yield
if filename:
rlmodule.write_history_file(filename)
finally:
rlmodule.clear_history()
for line in history:
rlmodule.add_history(line)
rlmodule.set_history_length(saved_history_length)
@contextlib.contextmanager
def update_attrs(obj, **kwargs):
"""Context manager for attribute update"""
undefined = object()
state = {}
for attr, new_value in kwargs.items():
old_value = getattr(obj, attr, undefined)
state[attr] = old_value
setattr(obj, attr, new_value)
try:
yield
finally:
for attr, old_value in state.items():
if old_value is undefined:
delattr(obj, attr)
else:
setattr(obj, attr, old_value)
@contextlib.contextmanager
def if_contextmanager(condition, if_context, else_context=None):
"""Select a context manager based on a condition
Parameters
----------
condition: bool
the condition
if_context: context manager
context manager to be executed if `condition` is True
else_context: context manager
context manager to be executed if `condition` is False
Yields
------
the selected context
"""
context = None
if condition:
context = if_context
else:
context = else_context
if context is None:
yield
else:
with context as ctx:
yield ctx
class PyShell(object):
"""Python shell
Parameters
----------
stdin: file, optional
standard input stream (defaults to ``sys.stdin``)
stdout: file, optional
standard output stream (defaults to ``sys.stdout``)
stderr: file, optional
standard error stream (defaults to ``sys.stderr``)
local_variables: dict, optional
a dictionary of local variables
ps1: str, optional
the PS1 prompt (defaults to ">>>")
ps2: str, optional
the PS2 prompt (defaults to "...")
banner: str, optional
the shell banner(defaults to "")
history_filename: str, optional
the history filename (defaults to None)
init: str, optional
list of initialization statements (defaults to ``()``)
"""
def __init__(self, *,
stdin=None, stdout=None, stderr=None, local_variables=None,
ps1=">>> ", ps2="... ", banner=None, history_filename=None,
init=()):
self._stdin = stdin or sys.stdin
self._stdout = stdout or sys.stdout
self._stderr = stderr or sys.stderr
if local_variables is None:
local_variables = {}
self._local_variables = local_variables
self._ps1 = rl_escape_prompt(ps1)
self._ps2 = rl_escape_prompt(ps2)
self._banner = banner
self._history_filename = history_filename
self._init = init
@property
def history_filename(self):
"Get the history filename"
return self._history_filename
@history_filename.setter
def history_filename(self, value):
"Set the history filename"
self._history_filename = value
@property
def banner(self):
"Get the banner"
return self._banner
@banner.setter
def banner(self, value):
"Set the banner"
self._banner = value
@property
def ps1(self):
"Get the PS1 prompt"
return self._ps1
@ps1.setter
def ps1(self, value):
"Set the PS1 prompt"
self._ps1 = value
@property
def ps2(self):
"Get the PS2 prompt"
return self._ps2
@ps2.setter
def ps2(self, value):
"Set the PS2 prompt"
self._ps2 = value
@property
def stdin(self):
"Get the stdin file"
return self._stdin
@stdin.setter
def stdin(self, value):
"Set the stdin file"
self._stdin = value
@property
def stdout(self):
"Get the stdout file"
return self._stdout
@stdout.setter
def stdout(self, value):
"Set the stdout file"
self._stdout = value
@property
def stderr(self):
"Get the stderr file"
return self._stderr
@stderr.setter
def stderr(self, value):
"Set the stderr file"
self._stderr = value
@property
def local_variables(self):
"Get the local_variables dict"
return self._local_variables
@local_variables.setter
def local_variables(self, value):
"Set the local_variables dict"
self._local_variables = value
def get_banner(self):
"Return a banner"
if self._banner is None:
return """\
Python {version} on {platform}
Type "help", "copyright", "credits" or "license" for more information.
""".format(version=sys.version, platform=sys.platform)
else:
return self._banner
def __call__(self, *args):
local_variables = self._local_variables
def quit(): # pylint: disable=redefined-builtin
"Exit from the console"
raise SystemExit()
remove_keys = []
for key in 'quit', 'exit':
if key not in local_variables:
local_variables[key] = quit
remove_keys.append(key)
try:
with update_attrs(sys, ps1=self._ps1,
ps2=self._ps2,
stdin=self._stdin or sys.stdin,
stdout=self._stdout or sys.stdout,
stderr=self._stderr or sys.stderr):
py_interpreter = code.InteractiveConsole(local_variables)
if args:
for arg in args:
py_interpreter.runcode(arg)
else:
history_filename = self._history_filename
with if_contextmanager(history_filename, swap_history(history_filename)):
try:
py_interpreter.interact(banner=self.get_banner())
except SystemExit:
pass
finally:
for key in remove_keys:
if key in local_variables:
del local_variables[key]
return local_variables
def traced(fun): # pragma: no cover
"""Decorator tracing exceptions"""
@functools.wraps(fun)
def trfun(*args, **kwargs):
"""Decorated function"""
try:
return fun(*args, **kwargs)
except: # pylint: disable=bare-except
traceback.print_exc()
raise
return trfun
def identity(text):
"""Identity function"""
return text
|
|
import datetime
from dateutil.rrule import rrule, YEARLY, MONTHLY, WEEKLY, DAILY
from operator import itemgetter
from django.conf import settings
from django.urls import reverse
from django.db import models
from django.db.models import Q
from django.template.defaultfilters import date
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from cms.models import ContentManageable, NameSlugModel
from markupfield.fields import MarkupField
from .utils import (
minutes_resolution, convert_dt_to_aware, timedelta_nice_repr, timedelta_parse,
)
DEFAULT_MARKUP_TYPE = getattr(settings, 'DEFAULT_MARKUP_TYPE', 'restructuredtext')
class Calendar(ContentManageable):
url = models.URLField('URL iCal', blank=True, null=True)
rss = models.URLField('RSS Feed', blank=True, null=True)
embed = models.URLField('URL embed', blank=True, null=True)
twitter = models.URLField('Twitter feed', blank=True, null=True)
name = models.CharField(max_length=100)
slug = models.SlugField(unique=True)
description = models.CharField(max_length=255, null=True, blank=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('events:event_list', kwargs={'calendar_slug': self.slug})
def import_events(self):
if self.url is None:
raise ValueError("calendar must have a url field set")
from .importer import ICSImporter
importer = ICSImporter(calendar=self)
importer.import_events()
class EventCategory(NameSlugModel):
calendar = models.ForeignKey(
Calendar,
related_name='categories',
null=True,
blank=True,
on_delete=models.CASCADE,
)
class Meta:
verbose_name_plural = 'event categories'
ordering = ('name',)
def get_absolute_url(self):
return reverse('events:eventlist_category', kwargs={'calendar_slug': self.calendar.slug, 'slug': self.slug})
class EventLocation(models.Model):
calendar = models.ForeignKey(
Calendar,
related_name='locations',
null=True,
blank=True,
on_delete=models.CASCADE,
)
name = models.CharField(max_length=255)
address = models.CharField(blank=True, null=True, max_length=255)
url = models.URLField('URL', blank=True, null=True)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('events:eventlist_location', kwargs={'calendar_slug': self.calendar.slug, 'pk': self.pk})
class EventManager(models.Manager):
def for_datetime(self, dt=None):
if dt is None:
dt = timezone.now()
else:
dt = convert_dt_to_aware(dt)
return self.filter(Q(occurring_rule__dt_start__gt=dt) | Q(recurring_rules__finish__gt=dt))
def until_datetime(self, dt=None):
if dt is None:
dt = timezone.now()
else:
dt = convert_dt_to_aware(dt)
return self.filter(Q(occurring_rule__dt_end__lt=dt) | Q(recurring_rules__begin__lt=dt))
class Event(ContentManageable):
uid = models.CharField(max_length=200, null=True, blank=True)
title = models.CharField(max_length=200)
calendar = models.ForeignKey(Calendar, related_name='events', on_delete=models.CASCADE)
description = MarkupField(default_markup_type=DEFAULT_MARKUP_TYPE, escape_html=False)
venue = models.ForeignKey(
EventLocation,
related_name='events',
null=True,
blank=True,
on_delete=models.CASCADE,
)
categories = models.ManyToManyField(EventCategory, related_name='events', blank=True)
featured = models.BooleanField(default=False, db_index=True)
objects = EventManager()
class Meta:
ordering = ('-occurring_rule__dt_start',)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('events:event_detail', kwargs={'calendar_slug': self.calendar.slug, 'pk': self.pk})
@cached_property
def previous_event(self):
if not self.next_time:
return None
dt = self.next_time.dt_end
try:
return Event.objects.until_datetime(dt).filter(calendar=self.calendar)[0]
except IndexError:
return None
@cached_property
def next_event(self):
if not self.next_time:
return None
dt = self.next_time.dt_start
try:
return Event.objects.for_datetime(dt).filter(calendar=self.calendar)[0]
except IndexError:
return None
@property
def next_time(self):
"""
Return the OccurringRule or RecurringRule with the closest `dt_start` from now.
"""
now = timezone.now()
recurring_start = occurring_start = None
try:
occurring_rule = self.occurring_rule
except OccurringRule.DoesNotExist:
pass
else:
if occurring_rule and occurring_rule.dt_start > now:
occurring_start = (occurring_rule.dt_start, occurring_rule)
rrules = self.recurring_rules.filter(finish__gt=now)
recurring_starts = [(rule.dt_start, rule) for rule in rrules if rule.dt_start is not None]
recurring_starts.sort(key=itemgetter(0))
try:
recurring_start = recurring_starts[0]
except IndexError:
pass
starts = [i for i in (recurring_start, occurring_start) if i is not None]
starts.sort(key=itemgetter(0))
try:
return starts[0][1]
except IndexError:
return None
@property
def previous_time(self):
now = timezone.now()
recurring_end = occurring_end = None
try:
occurring_rule = self.occurring_rule
except OccurringRule.DoesNotExist:
pass
else:
if occurring_rule and occurring_rule.dt_end < now:
occurring_end = (occurring_rule.dt_end, occurring_rule)
rrules = self.recurring_rules.filter(begin__lt=now)
recurring_ends = [(rule.dt_end, rule) for rule in rrules if rule.dt_end is not None]
recurring_ends.sort(key=itemgetter(0), reverse=True)
try:
recurring_end = recurring_ends[0]
except IndexError:
pass
ends = [i for i in (recurring_end, occurring_end) if i is not None]
ends.sort(key=itemgetter(0), reverse=True)
try:
return ends[0][1]
except IndexError:
return None
@property
def next_or_previous_time(self):
return self.next_time or self.previous_time
@property
def is_past(self):
return self.next_time is None
class RuleMixin:
def valid_dt_end(self):
return minutes_resolution(self.dt_end) > minutes_resolution(self.dt_start)
class OccurringRule(RuleMixin, models.Model):
"""
A single occurrence of an Event.
Shares the same API of `RecurringRule`.
"""
event = models.OneToOneField(Event, related_name='occurring_rule', on_delete=models.CASCADE)
dt_start = models.DateTimeField(default=timezone.now)
dt_end = models.DateTimeField(default=timezone.now)
all_day = models.BooleanField(default=False)
def __str__(self):
strftime = settings.SHORT_DATETIME_FORMAT
return '%s %s - %s' % (self.event.title, date(self.dt_start.strftime, strftime), date(self.dt_end.strftime, strftime))
@property
def begin(self):
return self.dt_start
@property
def finish(self):
return self.dt_end
@property
def duration(self):
return self.dt_end - self.dt_start
@property
def single_day(self):
return self.dt_start.date() == self.dt_end.date()
def duration_default():
return datetime.timedelta(minutes=15)
class RecurringRule(RuleMixin, models.Model):
"""
A repeating occurrence of an Event.
Shares the same API of `OccurringRule`.
"""
FREQ_CHOICES = (
(YEARLY, 'year(s)'),
(MONTHLY, 'month(s)'),
(WEEKLY, 'week(s)'),
(DAILY, 'day(s)'),
)
event = models.ForeignKey(Event, related_name='recurring_rules', on_delete=models.CASCADE)
begin = models.DateTimeField(default=timezone.now)
finish = models.DateTimeField(default=timezone.now)
duration_internal = models.DurationField(default=duration_default)
duration = models.CharField(max_length=50, default='15 min')
interval = models.PositiveSmallIntegerField(default=1)
frequency = models.PositiveSmallIntegerField(FREQ_CHOICES, default=WEEKLY)
all_day = models.BooleanField(default=False)
def __str__(self):
strftime = settings.SHORT_DATETIME_FORMAT
return '%s every %s since %s' % (self.event.title, timedelta_nice_repr(self.interval), date(self.dt_start.strftime, strftime))
def to_rrule(self):
return rrule(
freq=self.frequency,
interval=self.interval,
dtstart=self.begin,
until=self.finish,
)
@property
def freq_interval_as_timedelta(self):
timedelta_frequencies = {
YEARLY: datetime.timedelta(days=365),
MONTHLY: datetime.timedelta(days=30),
WEEKLY: datetime.timedelta(days=7),
DAILY: datetime.timedelta(days=1),
}
return self.interval * timedelta_frequencies[self.frequency]
@property
def dt_start(self):
since = timezone.now()
recurrence = self.to_rrule().after(since)
if recurrence is None:
return since
return recurrence
@property
def dt_end(self):
return self.dt_start + self.duration_internal
@property
def single_day(self):
return self.dt_start.date() == self.dt_end.date()
def save(self, *args, **kwargs):
self.duration_internal = timedelta_parse(self.duration)
super().save(*args, **kwargs)
class Alarm(ContentManageable):
event = models.ForeignKey(Event, on_delete=models.CASCADE)
trigger = models.PositiveSmallIntegerField(_("hours before the event occurs"), default=24)
def __str__(self):
return 'Alarm for %s to %s' % (self.event.title, self.recipient)
@property
def recipient(self):
full_name = self.creator.get_full_name()
if full_name:
return "%s <%s>" % (full_name, self.creator.email)
return self.creator.email
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.