code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/env python3
import os
import docutils
import gzip
import docutils.core
import docutils.writers.manpage
from distutils.cmd import Command
from distutils.core import setup
from distutils.command.build import build
from subprocess import Popen, PIPE
class build_manpage(Command):
description = 'Generate and compress man page'
user_options = []
def initialize_options(self): pass
def finalize_options(self): pass
def run(self):
os.mkdir(os.path.join('build','doc'))
with open(os.path.join('doc', 'recur.rst')) as srcfile:
with gzip.open(os.path.join('build', 'doc', 'recur.1.gz'), 'w', 9) as manpage:
manpage_string = docutils.core.publish_string(
srcfile.read(),
writer=docutils.writers.manpage.Writer())
manpage.write(manpage_string)
build.sub_commands.append(('build_manpage', None))
rev = Popen(["git","rev-parse","--short", "HEAD"], stdout=PIPE).communicate()[0].decode().rstrip()
setup(
name='recur',
version = 'git-{}'.format(rev),
description = 'A to-do list manager that gamifies your recurring tasks',
author = 'Tobias Frilling',
author_email = 'tobias@frilling-online.de',
url = 'http://www.github.com/ckafi/recur/',
py_modules = ['recur'],
scripts = ['scripts/recur'],
data_files = [('share/man/man1', ['build/doc/recur.1.gz'])],
cmdclass = {'build_manpage' : build_manpage },
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
'Topic :: Office/Business'
]
)
|
ckafi/recur
|
setup.py
|
Python
|
mpl-2.0
| 1,794
|
# Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
class _Intersection(object):
def __init__(self, *tokens):
self._tokens = tokens
def matches(self, tokens):
for token in self._tokens:
if token not in tokens and (token + 's') not in tokens:
return False
return True
class _Area(object):
def __init__(self, name, tokens=None):
self._name = name
self._tokens = tokens if tokens else [self._name_to_token(name)]
def _name_to_token(self, word):
token = word.lower()
return token[:-1] if word[-1] == 's' else token
def matches(self, tokens):
# FIXME: Support pluraization properly
for token in self._tokens:
if isinstance(token, _Intersection):
if token.matches(tokens):
return True
elif token in tokens or (token + 's') in tokens:
return True
return False
def name(self):
return self._name
def tokens(self):
return self._tokens
contribution_areas = [
_Area('ARM JIT', ['arm']),
# FIXME: 'Accelerated compositing / GPU acceleration'
_Area('Accessibility'),
_Area('Android port', ['android']),
_Area('Animation', ['animation', 'animator']),
_Area('Apple\'s Windows port', ['win', 'windows']), # FIXME: need to exclude chromium...
_Area('Autotools Build', ['autotools']),
_Area('Basic types and data structures (WTF)', ['wtf']),
# FIXME: 'Bidirectional text'
# FIXME: 'Build/test infrastructure (stuff under Tools/Scripts)'
_Area('CMake Build', ['cmakelist']),
_Area('CSS (Cascading Style Sheets)', ['css']),
_Area('CSS Transforms', [_Intersection('css', 'transforms')]),
_Area('CSS/SVG Filters', [_Intersection('css', 'filters'), _Intersection('svg', 'filters')]),
_Area('CURL HTTP Backend', ['CURL']),
_Area('Resource Cache', [_Intersection('loader', 'cache')]),
_Area('Memory Cache', [_Intersection('graphics', 'cache')]),
_Area('Cairo'),
_Area('Canvas'),
_Area('Chromium Linux', [_Intersection('chromium', 'linux')]),
# FIXME: 'Commit Queue'
_Area('Core DOM', ['dom']),
_Area('Core Graphics', ['cg']),
_Area('Bindings'),
_Area('DOM Storage', ['storage']),
_Area('Drag and Drop', ['drag']),
_Area('DumpRenderTree'),
_Area('EFL', ['efl']),
_Area('Editing / Selection', ['editing']),
_Area('Event Handling', ['event']),
_Area('FastMalloc'),
_Area('File API', ['fileapi']),
_Area('Fonts'),
_Area('Forms'),
# FIXME: 'Frame Flattening'
_Area('Frame Loader'),
# FIXME: 'General' Maybe auto-detect people contributing to all subdirectories?
_Area('Geolocation API', ['geolocation']),
_Area('Graphics', ['graphics']),
_Area('HTML', ['html']),
_Area('HTML Parser', [_Intersection('html', 'parser')]), # FIXME: matches html/track/WebVTTParser.cpp
_Area('HTML5 Media Support', ['media']),
_Area('History', ['history']),
# FIXME: 'Hit testing'
_Area('Image Decoder', ['imagedecoder']),
# FIXME: 'Input methods'
_Area('JSC Bindings', [_Intersection('bindings', 'js')]),
_Area('JavaScriptCore'),
_Area('JavaScriptCore Regular Expressions', [_Intersection('JavaScriptCore', 'regexp')]),
# FIXME: 'Layout tests' but what does it mean to say you're an expert on layout tests? Maybe worked on tools?
_Area('Loader', ['loader']),
_Area('MathML'),
_Area('Memory Use / Leaks', ['leaks']), # Probably need more tokens
_Area('MessagePorts'),
_Area('Network', [_Intersection('platform', 'network')]),
_Area('new-run-webkit-tests', ['layout_tests']),
_Area('OpenVG graphics backend', ['openvg']),
# FIXME: 'Performance'
_Area('Plug-ins', ['plugins']),
_Area('Printing', ['printing', 'print']),
_Area('Rendering'),
_Area('SVG (Scalable Vector Graphics)', ['svg']),
_Area('Scrollbars', ['scroll']),
_Area('Security'), # Probably need more tokens
# FIXME: 'Shadow DOM'
_Area('Skia'),
_Area('Soup Network Backend', ['soup']),
# FIXME: 'Spell Checking' just need tokens
_Area('Tables', ['htmltable', 'rendertable']),
# FIXME: 'Text Encoding'
# FIXME: 'Text Layout'
_Area('The Chromium Port', ['chromium']),
_Area('The EFLWebKit Port', ['efl']),
_Area('The WebKitGTK+ Port', ['gtk']),
_Area('The Haiku Port', ['haiku']),
_Area('The QtWebKit Port', ['qt']),
_Area('The WinCE Port', ['wince']),
_Area('The WinCairo Port', ['cairo']),
_Area('The wxWebKit Port', ['wx']),
_Area('Threading', ['thread']),
_Area('Tools'),
_Area('Touch Support', ['touch']),
_Area('Transforms', ['transforms']), # There's also CSS transforms
_Area('Transitions', ['transitions']), # This only matches transition events at the moment
_Area('URL Parsing', ['KURL']), # Probably need more tokens
_Area('V8', ['v8']),
_Area('V8 Bindings', [_Intersection('bindings', 'v8')]),
_Area('Web Inspector / Developer Tools', ['inspector']),
_Area('Web Timing', ['PerformanceNavigation', 'PerformanceTiming']), # more tokens?
_Area('WebCore Icon Database', ['icon']),
_Area('WebGL', ['webgl']),
_Area('WebKit Websites', ['websites']),
_Area('WebKit2'),
_Area('WebSQL Databases', [_Intersection('storage', 'database')]),
_Area('WebSockets'),
_Area('Workers'),
_Area('XML'),
_Area('XMLHttpRequest'),
_Area('XSLT'),
_Area('XSSAuditor'),
_Area('WebKit API Tests', ['TestWebKitAPI']),
_Area('webkit-patch', [_Intersection('webkitpy', 'commands')]),
]
class ContributionAreas(object):
def __init__(self, filesystem, table=contribution_areas):
self._filesystem = filesystem
self._contribution_areas = table
def names(self):
return [area.name() for area in self._contribution_areas]
def _split_path(self, path):
result = []
while path and len(path):
next_path, tail = self._filesystem.split(path)
if path == next_path:
break
if tail and len(tail):
result.append(tail)
path = next_path
return result
def _split_camelcase(self, name, transform=lambda x: x):
result = []
while name and len(name):
m = re.match('^([A-Z][a-z0-9]+)|([A-Z0-9]+(?=([A-Z][a-z0-9]|\.|$)))', name)
if m:
result.append(transform(m.group(0)))
name = name[m.end():]
else:
return result
return result
def areas_for_touched_files(self, touched_files):
areas = set()
for file_path in touched_files:
split_file_path = self._split_path(file_path)
tokenized_file_path = None
tokenized_file_path = sum([self._split_camelcase(token, lambda x: x.lower()) for token in split_file_path], [])
for area in self._contribution_areas:
if area.matches(split_file_path) or area.matches(tokenized_file_path):
areas.add(area.name())
return areas
|
espadrine/opera
|
chromium/src/third_party/WebKit/Tools/Scripts/webkitpy/common/config/contributionareas.py
|
Python
|
bsd-3-clause
| 8,571
|
# -*- coding: utf-8 -*-
"""This module tests various ways how to set up the provisioning using the provisioning dialog."""
import re
from datetime import datetime, timedelta
import fauxfactory
import pytest
from cfme import test_requirements
from cfme.common.provider import cleanup_vm
from cfme.infrastructure.virtual_machines import Vm
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.provisioning import provisioning_form
from cfme.services import requests
from cfme.web_ui import InfoBlock, fill, flash
from utils import testgen
from utils.appliance.implementations.ui import navigate_to
from utils.blockers import BZ
from utils.generators import random_vm_name
from utils.log import logger
from utils.version import current_version
from utils.wait import wait_for, TimedOutError
pytestmark = [
pytest.mark.meta(server_roles="+automate"),
pytest.mark.usefixtures('uses_infra_providers'),
pytest.mark.long_running,
test_requirements.provision,
pytest.mark.meta(blockers=[
BZ(
1265466,
unblock=lambda provider: not provider.one_of(RHEVMProvider))
]),
pytest.mark.tier(3)
]
pytest_generate_tests = testgen.generate([InfraProvider], required_fields=[
['provisioning', 'template'],
['provisioning', 'host'],
['provisioning', 'datastore']
], scope="module")
@pytest.fixture(scope="function")
def vm_name():
vm_name = random_vm_name('provd')
return vm_name
@pytest.fixture(scope="function")
def prov_data(provisioning, provider):
data = {
"first_name": fauxfactory.gen_alphanumeric(),
"last_name": fauxfactory.gen_alphanumeric(),
"email": "{}@{}.test".format(
fauxfactory.gen_alphanumeric(), fauxfactory.gen_alphanumeric()),
"manager_name": "{} {}".format(
fauxfactory.gen_alphanumeric(), fauxfactory.gen_alphanumeric()),
"vlan": provisioning.get("vlan", None),
# "datastore_create": False,
"datastore_name": {"name": provisioning["datastore"]},
"host_name": {"name": provisioning["host"]},
# "catalog_name": provisioning["catalog_item_type"],
}
if provider.type == 'rhevm':
data['provision_type'] = 'Native Clone'
elif provider.type == 'virtualcenter':
data['provision_type'] = 'VMware'
# Otherwise just leave it alone
return data
@pytest.fixture(scope="function")
def provisioner(request, setup_provider, provider, vm_name):
def _provisioner(template, provisioning_data, delayed=None):
vm = Vm(name=vm_name, provider=provider, template_name=template)
navigate_to(vm, 'ProvisionVM')
fill(provisioning_form, provisioning_data, action=provisioning_form.submit_button)
flash.assert_no_errors()
request.addfinalizer(lambda: cleanup_vm(vm_name, provider))
if delayed is not None:
total_seconds = (delayed - datetime.utcnow()).total_seconds()
row_description = 'Provision from [{}] to [{}]'.format(template, vm_name)
cells = {'Description': row_description}
try:
row, __ = wait_for(requests.wait_for_request, [cells],
fail_func=requests.reload, num_sec=total_seconds, delay=5)
pytest.fail("The provisioning was not postponed")
except TimedOutError:
pass
logger.info('Waiting for vm %s to appear on provider %s', vm_name, provider.key)
wait_for(provider.mgmt.does_vm_exist, [vm_name], handle_exception=True, num_sec=600)
# nav to requests page happens on successful provision
logger.info('Waiting for cfme provision request for vm %s', vm_name)
row_description = 'Provision from [{}] to [{}]'.format(template, vm_name)
cells = {'Description': row_description}
row, __ = wait_for(requests.wait_for_request, [cells],
fail_func=requests.reload, num_sec=900, delay=20)
assert 'Successfully' in row.last_message.text and row.status.text != 'Error'
return vm
return _provisioner
def test_change_cpu_ram(provisioner, soft_assert, provider, prov_data, vm_name):
""" Tests change RAM and CPU in provisioning dialog.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set number of CPUs and amount of RAM.
* Submit the provisioning request and wait for it to finish.
* Visit the page of the provisioned VM. The summary should state correct values for CPU&RAM.
Metadata:
test_flag: provision
"""
prov_data["vm_name"] = vm_name
if provider.type == "scvmm" and current_version() == "5.6":
prov_data["num_cpus"] = "4"
else:
prov_data["num_sockets"] = "4"
prov_data["cores_per_socket"] = "1" if provider.type != "scvmm" else None
prov_data["memory"] = "4096"
template_name = provider.data['provisioning']['template']
vm = provisioner(template_name, prov_data)
# Go to the VM info
data = vm.get_detail(properties=("Properties", "Container")).strip()
# No longer possible to use version pick because of cherrypicking?
regexes = map(re.compile, [
r"^[^(]*(\d+) CPUs?.*, ([^)]+)[^)]*$",
r"^[^(]*\((\d+) CPUs?, ([^)]+)\)[^)]*$",
r"^.*?(\d+) CPUs? .*?(\d+ MB)$"])
for regex in regexes:
match = regex.match(data)
if match is not None:
num_cpus, memory = match.groups()
break
else:
raise ValueError("Could not parse string {}".format(repr(data)))
soft_assert(num_cpus == "4", "num_cpus should be {}, is {}".format("4", num_cpus))
soft_assert(memory == "4096 MB", "memory should be {}, is {}".format("4096 MB", memory))
# Special parametrization in testgen above
@pytest.mark.meta(blockers=[1209847, 1380782])
@pytest.mark.parametrize("disk_format", ["thin", "thick", "preallocated"])
@pytest.mark.uncollectif(lambda provider, disk_format:
(provider.type == "rhevm" and disk_format == "thick") or
(provider.type != "rhevm" and disk_format == "preallocated") or
# Temporarily, our storage domain cannot handle preallocated disks
(provider.type == "rhevm" and disk_format == "preallocated") or
(provider.type == "scvmm") or
(provider.key == "vsphere55" and disk_format == "thick"))
def test_disk_format_select(provisioner, disk_format, provider, prov_data, vm_name):
""" Tests disk format selection in provisioning dialog.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set the disk format to be thick or thin.
* Submit the provisioning request and wait for it to finish.
* Visit the page of the provisioned VM.
* The ``Thin Provisioning Used`` field should state true of false according to the selection
Metadata:
test_flag: provision
"""
prov_data["vm_name"] = vm_name
prov_data["disk_format"] = disk_format
template_name = provider.data['provisioning']['template']
vm = provisioner(template_name, prov_data)
# Go to the VM info
vm.load_details(refresh=True)
thin = InfoBlock.text(
"Datastore Allocation Summary", "Thin Provisioning Used").strip().lower() == "true"
if disk_format == "thin":
assert thin, "The disk format should be Thin"
else:
assert not thin, "The disk format should not be Thin"
@pytest.mark.parametrize("started", [True, False])
def test_power_on_or_off_after_provision(provisioner, prov_data, provider, started, vm_name):
""" Tests setting the desired power state after provisioning.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set whether you want or not the VM to be
powered on after provisioning.
* Submit the provisioning request and wait for it to finish.
* The VM should become steady in the desired VM power state.
Metadata:
test_flag: provision
"""
prov_data["vm_name"] = vm_name
prov_data["power_on"] = started
template_name = provider.data['provisioning']['template']
provisioner(template_name, prov_data)
wait_for(
lambda: provider.mgmt.does_vm_exist(vm_name) and
(provider.mgmt.is_vm_running if started else provider.mgmt.is_vm_stopped)(vm_name),
num_sec=240, delay=5
)
def test_tag(provisioner, prov_data, provider, vm_name):
""" Tests tagging VMs using provisioning dialogs.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, pick a tag.
* Submit the provisioning request and wait for it to finish.
* Visit th page of VM, it should display the selected tags
Metadata:
test_flag: provision
"""
prov_data["vm_name"] = vm_name
prov_data["apply_tags"] = [(["Service Level *", "Gold"], True)]
template_name = provider.data['provisioning']['template']
vm = provisioner(template_name, prov_data)
tags = vm.get_tags()
assert any(tag.category.display_name == "Service Level" and tag.display_name == "Gold"
for tag in tags), "Service Level: Gold not in tags ({})".format(str(tags))
@pytest.mark.meta(blockers=[1204115])
def test_provisioning_schedule(provisioner, provider, prov_data, vm_name):
""" Tests provision scheduling.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set a scheduled provision and pick a time.
* Submit the provisioning request, it should not start before the scheduled time.
Metadata:
test_flag: provision
"""
now = datetime.utcnow()
prov_data["vm_name"] = vm_name
prov_data["schedule_type"] = "schedule"
prov_data["provision_date"] = now.strftime("%m/%d/%Y")
STEP = 5
minutes_diff = (STEP - (now.minute % STEP))
# To have some gap for automation
if minutes_diff <= 3:
minutes_diff += 5
provision_time = timedelta(minutes=minutes_diff) + now
prov_data["provision_start_hour"] = str(provision_time.hour)
prov_data["provision_start_min"] = str(provision_time.minute)
template_name = provider.data['provisioning']['template']
provisioner(template_name, prov_data, delayed=provision_time)
|
rlbabyuk/integration_tests
|
cfme/tests/infrastructure/test_provisioning_dialog.py
|
Python
|
gpl-2.0
| 10,935
|
# Adapted from
# https://github.com/CODAIT/text-extensions-for-pandas/blob/dc03278689fe1c5f131573658ae19815ba25f33e/text_extensions_for_pandas/array/tensor.py
# and
# https://github.com/CODAIT/text-extensions-for-pandas/blob/dc03278689fe1c5f131573658ae19815ba25f33e/text_extensions_for_pandas/array/arrow_conversion.py
#
# Copyright (c) 2020 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modifications:
# - Added ArrowTensorType.to_pandas_type()
# - Added ArrowTensorArray.__getitem__()
# - Added ArrowTensorArray.__iter__()
# - Added support for column casts to extension types.
# - Fleshed out docstrings and examples.
# - Fixed TensorArray.isna() so it returns an appropriate ExtensionArray.
# - Added different (more vectorized) TensorArray.take() operation.
# - Added support for more reducers (agg funcs) to TensorArray.
# - Added support for logical operators to TensorArray(Element).
# - Miscellaneous small bug fixes and optimizations.
from collections import Iterable
import numbers
from typing import Sequence, Any, Union, Tuple, Optional, Callable
import numpy as np
import pandas as pd
from pandas._typing import Dtype
from pandas.compat import set_function_name
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
try:
from pandas.core.dtypes.generic import ABCIndex
except ImportError:
# ABCIndexClass changed to ABCIndex in Pandas 1.3
from pandas.core.dtypes.generic import ABCIndexClass as ABCIndex
from pandas.core.indexers import check_array_indexer, validate_indices
import pyarrow as pa
from ray.util.annotations import PublicAPI
# -----------------------------------------------------------------------------
# Pandas extension type and array
# -----------------------------------------------------------------------------
@PublicAPI(stability="beta")
@pd.api.extensions.register_extension_dtype
class TensorDtype(pd.api.extensions.ExtensionDtype):
"""
Pandas extension type for a column of fixed-shape, homogeneous-typed
tensors.
See:
https://github.com/pandas-dev/pandas/blob/master/pandas/core/dtypes/base.py
for up-to-date interface documentation and the subclassing contract. The
docstrings of the below properties and methods were copied from the base
ExtensionDtype.
Examples:
>>> # Create a DataFrame with a list of ndarrays as a column.
>>> df = pd.DataFrame({
"one": [1, 2, 3],
"two": list(np.arange(24).reshape((3, 2, 2, 2)))})
>>> # Note the opaque np.object dtype for this column.
>>> df.dtypes
one int64
two object
dtype: object
>>> # Cast column to our TensorDtype extension type.
>>> df["two"] = df["two"].astype(TensorDtype())
>>> # Note that the column dtype is now TensorDtype instead of
>>> # np.object.
>>> df.dtypes
one int64
two TensorDtype
dtype: object
>>> # Pandas is now aware of this tensor column, and we can do the
>>> # typical DataFrame operations on this column.
>>> col = 2 * (df["two"] + 10)
>>> # The ndarrays underlying the tensor column will be manipulated,
>>> # but the column itself will continue to be a Pandas type.
>>> type(col)
pandas.core.series.Series
>>> col
0 [[[ 2 4]
[ 6 8]]
[[10 12]
[14 16]]]
1 [[[18 20]
[22 24]]
[[26 28]
[30 32]]]
2 [[[34 36]
[38 40]]
[[42 44]
[46 48]]]
Name: two, dtype: TensorDtype
>>> # Once you do an aggregation on that column that returns a single
>>> # row's value, you get back our TensorArrayElement type.
>>> tensor = col.mean()
>>> type(tensor)
ray.data.extensions.tensor_extension.TensorArrayElement
>>> tensor
array([[[18., 20.],
[22., 24.]],
[[26., 28.],
[30., 32.]]])
>>> # This is a light wrapper around a NumPy ndarray, and can easily
>>> # be converted to an ndarray.
>>> type(tensor.to_numpy())
numpy.ndarray
>>> # In addition to doing Pandas operations on the tensor column,
>>> # you can now put the DataFrame into a Dataset.
>>> ds = ray.data.from_pandas(df)
>>> # Internally, this column is represented the corresponding
>>> # Arrow tensor extension type.
>>> ds.schema()
one: int64
two: extension<arrow.py_extension_type<ArrowTensorType>>
>>> # You can write the dataset to Parquet.
>>> ds.write_parquet("/some/path")
>>> # And you can read it back.
>>> read_ds = ray.data.read_parquet("/some/path")
>>> read_ds.schema()
one: int64
two: extension<arrow.py_extension_type<ArrowTensorType>>
>>> read_df = ray.get(read_ds.to_pandas_refs())[0]
>>> read_df.dtypes
one int64
two TensorDtype
dtype: object
>>> # The tensor extension type is preserved along the
>>> # Pandas --> Arrow --> Parquet --> Arrow --> Pandas
>>> # conversion chain.
>>> read_df.equals(df)
True
"""
# NOTE(Clark): This is apparently required to prevent integer indexing
# errors, but is an undocumented ExtensionDtype attribute. See issue:
# https://github.com/CODAIT/text-extensions-for-pandas/issues/166
base = None
@property
def type(self):
"""
The scalar type for the array, e.g. ``int``
It's expected ``ExtensionArray[item]`` returns an instance
of ``ExtensionDtype.type`` for scalar ``item``, assuming
that value is valid (not NA). NA values do not need to be
instances of `type`.
"""
return TensorArrayElement
@property
def name(self) -> str:
"""
A string identifying the data type.
Will be used for display in, e.g. ``Series.dtype``
"""
return "TensorDtype"
@classmethod
def construct_from_string(cls, string: str):
"""
Construct this type from a string.
This is useful mainly for data types that accept parameters.
For example, a period dtype accepts a frequency parameter that
can be set as ``period[H]`` (where H means hourly frequency).
By default, in the abstract class, just the name of the type is
expected. But subclasses can overwrite this method to accept
parameters.
Parameters
----------
string : str
The name of the type, for example ``category``.
Returns
-------
ExtensionDtype
Instance of the dtype.
Raises
------
TypeError
If a class cannot be constructed from this 'string'.
Examples
--------
For extension dtypes with arguments the following may be an
adequate implementation.
>>> @classmethod
... def construct_from_string(cls, string):
... pattern = re.compile(r"^my_type\[(?P<arg_name>.+)\]$")
... match = pattern.match(string)
... if match:
... return cls(**match.groupdict())
... else:
... raise TypeError(
... f"Cannot construct a '{cls.__name__}' from '{string}'"
... )
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
# Upstream code uses exceptions as part of its normal control flow and
# will pass this method bogus class names.
if string == cls.__name__:
return cls()
else:
raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
@classmethod
def construct_array_type(cls):
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return TensorArray
def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]):
"""
Convert a pyarrow (chunked) array to a TensorArray.
This and TensorArray.__arrow_array__ make up the
Pandas extension type + array <--> Arrow extension type + array
interoperability protocol. See
https://pandas.pydata.org/pandas-docs/stable/development/extending.html#compatibility-with-apache-arrow
for more information.
"""
if isinstance(array, pa.ChunkedArray):
if array.num_chunks > 1:
# TODO(Clark): Remove concat and construct from list with
# shape.
values = np.concatenate(
[chunk.to_numpy() for chunk in array.iterchunks()]
)
else:
values = array.chunk(0).to_numpy()
else:
values = array.to_numpy()
return TensorArray(values)
class TensorOpsMixin(pd.api.extensions.ExtensionScalarOpsMixin):
"""
Mixin for TensorArray operator support, applying operations on the
underlying ndarrays.
"""
@classmethod
def _create_method(cls, op, coerce_to_dtype=True, result_dtype=None):
"""
Add support for binary operators by unwrapping, applying, and
rewrapping.
"""
# NOTE(Clark): This overrides, but coerce_to_dtype, result_dtype might
# not be needed
def _binop(self, other):
lvalues = self._tensor
if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndex)):
# Rely on Pandas to unbox and dispatch to us.
return NotImplemented
# divmod returns a tuple
if op_name in ["__divmod__", "__rdivmod__"]:
# TODO(Clark): Add support for divmod and rdivmod.
# div, mod = result
raise NotImplementedError
if isinstance(other, (TensorArray, TensorArrayElement)):
rvalues = other._tensor
else:
rvalues = other
result = op(lvalues, rvalues)
# Force a TensorArray if rvalue is not a scalar.
if isinstance(self, TensorArrayElement) and (
not isinstance(other, TensorArrayElement) or not np.isscalar(other)
):
result_wrapped = TensorArray(result)
else:
result_wrapped = cls(result)
return result_wrapped
op_name = f"__{op.__name__}__"
return set_function_name(_binop, op_name, cls)
@classmethod
def _create_logical_method(cls, op):
return cls._create_method(op)
class TensorArrayElement(TensorOpsMixin):
"""
Single element of a TensorArray, wrapping an underlying ndarray.
"""
def __init__(self, values: np.ndarray):
"""
Construct a TensorArrayElement from a NumPy ndarray.
Args:
values: ndarray that underlies this TensorArray element.
"""
self._tensor = values
def __repr__(self):
return self._tensor.__repr__()
def __str__(self):
return self._tensor.__str__()
def to_numpy(self):
"""
Return the values of this element as a NumPy ndarray.
"""
return np.asarray(self._tensor)
def __array__(self):
return np.asarray(self._tensor)
@PublicAPI(stability="beta")
class TensorArray(pd.api.extensions.ExtensionArray, TensorOpsMixin):
"""
Pandas `ExtensionArray` representing a tensor column, i.e. a column
consisting of ndarrays as elements. All tensors in a column must have the
same shape.
Examples:
>>> # Create a DataFrame with a list of ndarrays as a column.
>>> df = pd.DataFrame({
"one": [1, 2, 3],
"two": TensorArray(np.arange(24).reshape((3, 2, 2, 2)))})
>>> # Note that the column dtype is TensorDtype.
>>> df.dtypes
one int64
two TensorDtype
dtype: object
>>> # Pandas is aware of this tensor column, and we can do the
>>> # typical DataFrame operations on this column.
>>> col = 2 * (df["two"] + 10)
>>> # The ndarrays underlying the tensor column will be manipulated,
>>> # but the column itself will continue to be a Pandas type.
>>> type(col)
pandas.core.series.Series
>>> col
0 [[[ 2 4]
[ 6 8]]
[[10 12]
[14 16]]]
1 [[[18 20]
[22 24]]
[[26 28]
[30 32]]]
2 [[[34 36]
[38 40]]
[[42 44]
[46 48]]]
Name: two, dtype: TensorDtype
>>> # Once you do an aggregation on that column that returns a single
>>> # row's value, you get back our TensorArrayElement type.
>>> tensor = col.mean()
>>> type(tensor)
ray.data.extensions.tensor_extension.TensorArrayElement
>>> tensor
array([[[18., 20.],
[22., 24.]],
[[26., 28.],
[30., 32.]]])
>>> # This is a light wrapper around a NumPy ndarray, and can easily
>>> # be converted to an ndarray.
>>> type(tensor.to_numpy())
numpy.ndarray
>>> # In addition to doing Pandas operations on the tensor column,
>>> # you can now put the DataFrame into a Dataset.
>>> ds = ray.data.from_pandas(df)
>>> # Internally, this column is represented the corresponding
>>> # Arrow tensor extension type.
>>> ds.schema()
one: int64
two: extension<arrow.py_extension_type<ArrowTensorType>>
>>> # You can write the dataset to Parquet.
>>> ds.write_parquet("/some/path")
>>> # And you can read it back.
>>> read_ds = ray.data.read_parquet("/some/path")
>>> read_ds.schema()
one: int64
two: extension<arrow.py_extension_type<ArrowTensorType>>
>>> read_df = ray.get(read_ds.to_pandas_refs())[0]
>>> read_df.dtypes
one int64
two TensorDtype
dtype: object
>>> # The tensor extension type is preserved along the
>>> # Pandas --> Arrow --> Parquet --> Arrow --> Pandas
>>> # conversion chain.
>>> read_df.equals(df)
True
"""
SUPPORTED_REDUCERS = {
"sum": np.sum,
"all": np.all,
"any": np.any,
"min": np.min,
"max": np.max,
"mean": np.mean,
"median": np.median,
"prod": np.prod,
"std": np.std,
"var": np.var,
}
# See https://github.com/pandas-dev/pandas/blob/master/pandas/core/arrays/base.py # noqa
# for interface documentation and the subclassing contract.
def __init__(
self,
values: Union[
np.ndarray,
ABCSeries,
Sequence[Union[np.ndarray, TensorArrayElement]],
TensorArrayElement,
Any,
],
):
"""
Args:
values: A NumPy ndarray or sequence of NumPy ndarrays of equal
shape.
"""
if isinstance(values, ABCSeries):
# Convert series to ndarray and passthrough to ndarray handling
# logic.
values = values.to_numpy()
if isinstance(values, np.ndarray):
if (
values.dtype.type is np.object_
and len(values) > 0
and isinstance(values[0], (np.ndarray, TensorArrayElement))
):
# Convert ndarrays of ndarrays/TensorArrayElements
# with an opaque object type to a properly typed ndarray of
# ndarrays.
self._tensor = np.array([np.asarray(v) for v in values])
else:
self._tensor = values
elif isinstance(values, Sequence):
if len(values) == 0:
self._tensor = np.array([])
else:
self._tensor = np.stack([np.asarray(v) for v in values], axis=0)
elif isinstance(values, TensorArrayElement):
self._tensor = np.array([np.asarray(values)])
elif np.isscalar(values):
# `values` is a single element:
self._tensor = np.array([values])
elif isinstance(values, TensorArray):
raise TypeError("Use the copy() method to create a copy of a TensorArray")
else:
raise TypeError(
"Expected a numpy.ndarray or sequence of numpy.ndarray, "
f"but received {values} "
f"of type '{type(values)}' instead."
)
@classmethod
def _from_sequence(
cls, scalars, *, dtype: Optional[Dtype] = None, copy: bool = False
):
"""
Construct a new ExtensionArray from a sequence of scalars.
Parameters
----------
scalars : Sequence
Each element will be an instance of the scalar type for this
array, ``cls.dtype.type`` or be converted into this type in this
method.
dtype : dtype, optional
Construct for this particular dtype. This should be a Dtype
compatible with the ExtensionArray.
copy : bool, default False
If True, copy the underlying data.
Returns
-------
ExtensionArray
"""
if copy and isinstance(scalars, np.ndarray):
scalars = scalars.copy()
elif isinstance(scalars, TensorArray):
scalars = scalars._tensor.copy() if copy else scalars._tensor
return TensorArray(scalars)
@classmethod
def _from_factorized(
cls, values: np.ndarray, original: pd.api.extensions.ExtensionArray
):
"""
Reconstruct an ExtensionArray after factorization.
Parameters
----------
values : ndarray
An integer ndarray with the factorized values.
original : ExtensionArray
The original ExtensionArray that factorize was called on.
See Also
--------
factorize : Top-level factorize method that dispatches here.
ExtensionArray.factorize : Encode the extension array as an enumerated
type.
"""
raise NotImplementedError
def __getitem__(
self, item: Union[int, slice, np.ndarray]
) -> Union["TensorArray", "TensorArrayElement"]:
"""
Select a subset of self.
Parameters
----------
item : int, slice, or ndarray
* int: The position in 'self' to get.
* slice: A slice object, where 'start', 'stop', and 'step' are
integers or None
* ndarray: A 1-d boolean NumPy ndarray the same length as 'self'
Returns
-------
item : scalar or ExtensionArray
Notes
-----
For scalar ``item``, return a scalar value suitable for the array's
type. This should be an instance of ``self.dtype.type``.
For slice ``key``, return an instance of ``ExtensionArray``, even
if the slice is length 0 or 1.
For a boolean mask, return an instance of ``ExtensionArray``, filtered
to the values where ``item`` is True.
"""
# Return scalar if single value is selected, a TensorArrayElement for
# single array element, or TensorArray for slice.
if isinstance(item, int):
value = self._tensor[item]
if np.isscalar(value):
return value
else:
return TensorArrayElement(value)
else:
# BEGIN workaround for Pandas issue #42430
if isinstance(item, tuple) and len(item) > 1 and item[0] == Ellipsis:
if len(item) > 2:
# Hopefully this case is not possible, but can't be sure
raise ValueError(
"Workaround Pandas issue #42430 not "
"implemented for tuple length > 2"
)
item = item[1]
# END workaround for issue #42430
if isinstance(item, TensorArray):
item = np.asarray(item)
item = check_array_indexer(self, item)
return TensorArray(self._tensor[item])
def __len__(self) -> int:
"""
Length of this array.
Returns
-------
length : int
"""
return len(self._tensor)
@property
def dtype(self) -> pd.api.extensions.ExtensionDtype:
"""
An instance of 'ExtensionDtype'.
"""
return TensorDtype()
@property
def nbytes(self) -> int:
"""
The number of bytes needed to store this object in memory.
"""
return self._tensor.nbytes
def isna(self) -> "TensorArray":
"""
A 1-D array indicating if each value is missing.
Returns
-------
na_values : Union[np.ndarray, ExtensionArray]
In most cases, this should return a NumPy ndarray. For
exceptional cases like ``SparseArray``, where returning
an ndarray would be expensive, an ExtensionArray may be
returned.
Notes
-----
If returning an ExtensionArray, then
* ``na_values._is_boolean`` should be True
* `na_values` should implement :func:`ExtensionArray._reduce`
* ``na_values.any`` and ``na_values.all`` should be implemented
"""
if self._tensor.dtype.type is np.object_:
# Avoid comparing with __eq__ because the elements of the tensor
# may do something funny with that operation.
result_list = [self._tensor[i] is None for i in range(len(self))]
result = np.broadcast_to(
np.array(result_list, dtype=np.bool), self.numpy_shape
)
elif self._tensor.dtype.type is np.str_:
result = self._tensor == ""
else:
result = np.isnan(self._tensor)
return TensorArray(result)
def take(
self, indices: Sequence[int], allow_fill: bool = False, fill_value: Any = None
) -> "TensorArray":
"""
Take elements from an array.
Parameters
----------
indices : sequence of int
Indices to be taken.
allow_fill : bool, default False
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : any, optional
Fill value to use for NA-indices when `allow_fill` is True.
This may be ``None``, in which case the default NA value for
the type, ``self.dtype.na_value``, is used.
For many ExtensionArrays, there will be two representations of
`fill_value`: a user-facing "boxed" scalar, and a low-level
physical NA value. `fill_value` should be the user-facing version,
and the implementation should handle translating that to the
physical version for processing the take if necessary.
Returns
-------
ExtensionArray
Raises
------
IndexError
When the indices are out of bounds for the array.
ValueError
When `indices` contains negative values other than ``-1``
and `allow_fill` is True.
See Also
--------
numpy.take : Take elements from an array along an axis.
api.extensions.take : Take elements from an array.
Notes
-----
ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``,
``iloc``, when `indices` is a sequence of values. Additionally,
it's called by :meth:`Series.reindex`, or any other method
that causes realignment, with a `fill_value`.
Examples
--------
Here's an example implementation, which relies on casting the
extension array to object dtype. This uses the helper method
:func:`pandas.api.extensions.take`.
.. code-block:: python
def take(self, indices, allow_fill=False, fill_value=None):
from pandas.core.algorithms import take
# If the ExtensionArray is backed by an ndarray, then
# just pass that here instead of coercing to object.
data = self.astype(object)
if allow_fill and fill_value is None:
fill_value = self.dtype.na_value
# fill value should always be translated from the scalar
# type for the array, to the physical storage type for
# the data, before passing to take.
result = take(data, indices, fill_value=fill_value,
allow_fill=allow_fill)
return self._from_sequence(result, dtype=self.dtype)
"""
if allow_fill:
# With allow_fill being True, negative values in `indices` indicate
# missing values and should be set to `fill_value`.
indices = np.asarray(indices, dtype=np.intp)
validate_indices(indices, len(self._tensor))
# Check if there are missing indices to fill, otherwise we can
# delegate to NumPy ndarray .take().
has_missing = np.any(indices < 0)
if has_missing:
if fill_value is None:
fill_value = np.nan
# Create an array populated with fill value.
values = np.full((len(indices),) + self._tensor.shape[1:], fill_value)
# Put tensors at the given positive indices into array.
is_nonneg = indices >= 0
np.put(values, np.where(is_nonneg)[0], self._tensor[indices[is_nonneg]])
return TensorArray(values)
# Delegate take to NumPy array.
values = self._tensor.take(indices, axis=0)
return TensorArray(values)
def copy(self) -> "TensorArray":
"""
Return a copy of the array.
Returns
-------
ExtensionArray
"""
# TODO(Clark): Copy cached properties.
return TensorArray(self._tensor.copy())
@classmethod
def _concat_same_type(cls, to_concat: Sequence["TensorArray"]) -> "TensorArray":
"""
Concatenate multiple array of this dtype.
Parameters
----------
to_concat : sequence of this type
Returns
-------
ExtensionArray
"""
return TensorArray(np.concatenate([a._tensor for a in to_concat]))
def __setitem__(self, key: Union[int, np.ndarray], value: Any) -> None:
"""
Set one or more values inplace.
This method is not required to satisfy the pandas extension array
interface.
Parameters
----------
key : int, ndarray, or slice
When called from, e.g. ``Series.__setitem__``, ``key`` will be
one of
* scalar int
* ndarray of integers.
* boolean ndarray
* slice object
value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object
value or values to be set of ``key``.
Returns
-------
None
"""
key = check_array_indexer(self, key)
if isinstance(value, TensorArrayElement) or np.isscalar(value):
value = np.asarray(value)
if isinstance(value, list):
value = [
np.asarray(v) if isinstance(v, TensorArrayElement) else v for v in value
]
if isinstance(value, ABCSeries) and isinstance(value.dtype, TensorDtype):
value = value.values
if value is None or isinstance(value, Sequence) and len(value) == 0:
self._tensor[key] = np.full_like(self._tensor[key], np.nan)
elif isinstance(key, (int, slice, np.ndarray)):
self._tensor[key] = value
else:
raise NotImplementedError(
f"__setitem__ with key type '{type(key)}' not implemented"
)
def __contains__(self, item) -> bool:
"""
Return for `item in self`.
"""
if isinstance(item, TensorArrayElement):
np_item = np.asarray(item)
if np_item.size == 1 and np.isnan(np_item).all():
return self.isna().any()
return super().__contains__(item)
def __repr__(self):
return self._tensor.__repr__()
def __str__(self):
return self._tensor.__str__()
def _values_for_factorize(self) -> Tuple[np.ndarray, Any]:
# TODO(Clark): return self._tensor, np.nan
raise NotImplementedError
def _reduce(self, name: str, skipna: bool = True, **kwargs):
"""
Return a scalar result of performing the reduction operation.
Parameters
----------
name : str
Name of the function, supported values are:
{ any, all, min, max, sum, mean, median, prod,
std, var, sem, kurt, skew }.
skipna : bool, default True
If True, skip NaN values.
**kwargs
Additional keyword arguments passed to the reduction function.
Currently, `ddof` is the only supported kwarg.
Returns
-------
scalar
Raises
------
TypeError : subclass does not define reductions
"""
supported_kwargs = ["ddof"]
reducer_kwargs = {}
for kw in supported_kwargs:
try:
reducer_kwargs[kw] = kwargs[kw]
except KeyError:
pass
try:
return TensorArrayElement(
self.SUPPORTED_REDUCERS[name](self._tensor, axis=0, **reducer_kwargs)
)
except KeyError:
raise NotImplementedError(f"'{name}' aggregate not implemented.") from None
def __array__(self, dtype: np.dtype = None):
return np.asarray(self._tensor, dtype=dtype)
def __array_ufunc__(self, ufunc: Callable, method: str, *inputs, **kwargs):
"""
Supports NumPy ufuncs without requiring sloppy coercion to an
ndarray.
"""
out = kwargs.get("out", ())
for x in inputs + out:
if not isinstance(x, (TensorArray, np.ndarray, numbers.Number)):
return NotImplemented
# Defer to the implementation of the ufunc on unwrapped values.
inputs = tuple(x._tensor if isinstance(x, TensorArray) else x for x in inputs)
if out:
kwargs["out"] = tuple(
x._tensor if isinstance(x, TensorArray) else x for x in out
)
result = getattr(ufunc, method)(*inputs, **kwargs)
if type(result) is tuple:
# Multiple return values.
return tuple(type(self)(x) for x in result)
elif method == "at":
# No return value.
return None
else:
# One return value.
return type(self)(result)
def to_numpy(
self,
dtype: np.dtype = None,
copy: bool = False,
na_value: Any = pd.api.extensions.no_default,
):
"""
Convert to a NumPy ndarray.
.. versionadded:: 1.0.0
This is similar to :meth:`numpy.asarray`, but may provide additional
control over how the conversion is done.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, optional
The value to use for missing values. The default value depends
on `dtype` and the type of the array.
Returns
-------
numpy.ndarray
"""
if dtype is not None:
dtype = pd.api.types.pandas_dtype(dtype)
if copy:
values = np.array(self._tensor, dtype=dtype, copy=True)
else:
values = self._tensor.astype(dtype)
elif copy:
values = self._tensor.copy()
else:
values = self._tensor
return values
@property
def numpy_dtype(self):
"""
Get the dtype of the tensor.
:return: The numpy dtype of the backing ndarray
"""
return self._tensor.dtype
@property
def numpy_ndim(self):
"""
Get the number of tensor dimensions.
:return: integer for the number of dimensions
"""
return self._tensor.ndim
@property
def numpy_shape(self):
"""
Get the shape of the tensor.
:return: A tuple of integers for the numpy shape of the backing ndarray
"""
return self._tensor.shape
@property
def _is_boolean(self):
"""
Whether this extension array should be considered boolean.
By default, ExtensionArrays are assumed to be non-numeric.
Setting this to True will affect the behavior of several places,
e.g.
* is_bool
* boolean indexing
Returns
-------
bool
"""
# This is needed to support returning a TensorArray from .isnan().
# TODO(Clark): Propagate tensor dtype to extension TensorDtype and
# move this property there.
return np.issubdtype(self._tensor.dtype, np.bool)
def astype(self, dtype, copy=True):
"""
Cast to a NumPy array with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ndarray
NumPy ndarray with 'dtype' for its dtype.
"""
dtype = pd.api.types.pandas_dtype(dtype)
if isinstance(dtype, TensorDtype):
values = TensorArray(self._tensor.copy()) if copy else self
elif not (
pd.api.types.is_object_dtype(dtype) and pd.api.types.is_string_dtype(dtype)
):
values = np.array([str(t) for t in self._tensor])
if isinstance(dtype, pd.StringDtype):
return dtype.construct_array_type()._from_sequence(values, copy=False)
else:
return values
elif pd.api.types.is_object_dtype(dtype):
# Interpret astype(object) as "cast to an array of numpy arrays"
values = np.empty(len(self), dtype=object)
for i in range(len(self)):
values[i] = self._tensor[i]
else:
values = self._tensor.astype(dtype, copy=copy)
return values
def any(self, axis=None, out=None, keepdims=False):
"""
Test whether any array element along a given axis evaluates to True.
See numpy.any() documentation for more information
https://numpy.org/doc/stable/reference/generated/numpy.any.html#numpy.any
:param axis: Axis or axes along which a logical OR reduction is
performed.
:param out: Alternate output array in which to place the result.
:param keepdims: If this is set to True, the axes which are reduced are
left in the result as dimensions with size one.
:return: single boolean unless axis is not None else TensorArray
"""
result = self._tensor.any(axis=axis, out=out, keepdims=keepdims)
return result if axis is None else TensorArray(result)
def all(self, axis=None, out=None, keepdims=False):
"""
Test whether all array elements along a given axis evaluate to True.
:param axis: Axis or axes along which a logical AND reduction is
performed.
:param out: Alternate output array in which to place the result.
:param keepdims: If this is set to True, the axes which are reduced are
left in the result as dimensions with size one.
:return: single boolean unless axis is not None else TensorArray
"""
result = self._tensor.all(axis=axis, out=out, keepdims=keepdims)
return result if axis is None else TensorArray(result)
def __arrow_array__(self, type=None):
"""
Convert this TensorArray to an ArrowTensorArray extension array.
This and TensorDtype.__from_arrow__ make up the
Pandas extension type + array <--> Arrow extension type + array
interoperability protocol. See
https://pandas.pydata.org/pandas-docs/stable/development/extending.html#compatibility-with-apache-arrow
for more information.
"""
return ArrowTensorArray.from_numpy(self._tensor)
# Add operators from the mixin to the TensorArrayElement and TensorArray
# classes.
TensorArrayElement._add_arithmetic_ops()
TensorArrayElement._add_comparison_ops()
TensorArrayElement._add_logical_ops()
TensorArray._add_arithmetic_ops()
TensorArray._add_comparison_ops()
TensorArray._add_logical_ops()
# -----------------------------------------------------------------------------
# Arrow extension type and array
# -----------------------------------------------------------------------------
@PublicAPI(stability="beta")
class ArrowTensorType(pa.PyExtensionType):
"""
Arrow ExtensionType for an array of fixed-shaped, homogeneous-typed
tensors.
This is the Arrow side of TensorDtype.
See Arrow extension type docs:
https://arrow.apache.org/docs/python/extending_types.html#defining-extension-types-user-defined-types
"""
def __init__(self, shape: Tuple[int, ...], dtype: pa.DataType):
"""
Construct the Arrow extension type for array of fixed-shaped tensors.
Args:
shape: Shape of contained tensors.
dtype: pyarrow dtype of tensor elements.
"""
self._shape = shape
super().__init__(pa.list_(dtype))
@property
def shape(self):
"""
Shape of contained tensors.
"""
return self._shape
def to_pandas_dtype(self):
"""
Convert Arrow extension type to corresponding Pandas dtype.
Returns:
An instance of pd.api.extensions.ExtensionDtype.
"""
return TensorDtype()
def __reduce__(self):
return ArrowTensorType, (self._shape, self.storage_type.value_type)
def __arrow_ext_class__(self):
"""
ExtensionArray subclass with custom logic for this array of tensors
type.
Returns:
A subclass of pd.api.extensions.ExtensionArray.
"""
return ArrowTensorArray
def __str__(self):
return "<ArrowTensorType: shape={}, dtype={}>".format(
self.shape, self.storage_type.value_type
)
@PublicAPI(stability="beta")
class ArrowTensorArray(pa.ExtensionArray):
"""
An array of fixed-shape, homogeneous-typed tensors.
This is the Arrow side of TensorArray.
See Arrow docs for customizing extension arrays:
https://arrow.apache.org/docs/python/extending_types.html#custom-extension-array-class
"""
OFFSET_DTYPE = np.int32
def __getitem__(self, key):
# This __getitem__ hook allows us to support proper
# indexing when accessing a single tensor (a "scalar" item of the
# array). Without this hook for integer keys, the indexing will fail on
# all currently released pyarrow versions due to a lack of proper
# ExtensionScalar support. Support was added in
# https://github.com/apache/arrow/pull/10904, but hasn't been released
# at the time of this comment, and even with this support, the returned
# ndarray is a flat representation of the n-dimensional tensor.
# NOTE(Clark): We'd like to override the pa.Array.getitem() helper
# instead, which would obviate the need for overriding __iter__()
# below, but unfortunately overriding Cython cdef methods with normal
# Python methods isn't allowed.
if isinstance(key, slice):
return super().__getitem__(key)
return self._to_numpy(key)
def __iter__(self):
# Override pa.Array.__iter__() in order to return an iterator of
# properly shaped tensors instead of an iterator of flattened tensors.
# See comment in above __getitem__ method.
for i in range(len(self)):
# Use overridden __getitem__ method.
yield self.__getitem__(i)
def to_pylist(self):
# Override pa.Array.to_pylist() due to a lack of ExtensionScalar
# support (see comment in __getitem__).
return list(self)
@classmethod
def from_numpy(cls, arr):
"""
Convert an ndarray or an iterable of fixed-shape ndarrays to an array
of fixed-shape, homogeneous-typed tensors.
Args:
arr: An ndarray or an iterable of fixed-shape ndarrays.
Returns:
An ArrowTensorArray containing len(arr) tensors of fixed shape.
"""
if isinstance(arr, (list, tuple)):
if np.isscalar(arr[0]):
return pa.array(arr)
elif isinstance(arr[0], np.ndarray):
# Stack ndarrays and pass through to ndarray handling logic
# below.
arr = np.stack(arr, axis=0)
if isinstance(arr, np.ndarray):
if not arr.flags.c_contiguous:
# We only natively support C-contiguous ndarrays.
arr = np.ascontiguousarray(arr)
pa_dtype = pa.from_numpy_dtype(arr.dtype)
outer_len = arr.shape[0]
element_shape = arr.shape[1:]
total_num_items = arr.size
num_items_per_element = np.prod(element_shape) if element_shape else 1
# Data buffer.
if pa.types.is_boolean(pa_dtype):
# NumPy doesn't represent boolean arrays as bit-packed, so we manually
# bit-pack the booleans before handing the buffer off to Arrow.
# NOTE: Arrow expects LSB bit-packed ordering.
# NOTE: This creates a copy.
arr = np.packbits(arr, bitorder="little")
data_buffer = pa.py_buffer(arr)
data_array = pa.Array.from_buffers(
pa_dtype, total_num_items, [None, data_buffer]
)
# Offset buffer.
offset_buffer = pa.py_buffer(
cls.OFFSET_DTYPE(
[i * num_items_per_element for i in range(outer_len + 1)]
)
)
storage = pa.Array.from_buffers(
pa.list_(pa_dtype),
outer_len,
[None, offset_buffer],
children=[data_array],
)
type_ = ArrowTensorType(element_shape, pa_dtype)
return pa.ExtensionArray.from_storage(type_, storage)
elif isinstance(arr, Iterable):
return cls.from_numpy(list(arr))
else:
raise ValueError("Must give ndarray or iterable of ndarrays.")
def _to_numpy(self, index: Optional[int] = None, zero_copy_only: bool = False):
"""
Helper for getting either an element of the array of tensors as an
ndarray, or the entire array of tensors as a single ndarray.
Args:
index: The index of the tensor element that we wish to return as
an ndarray. If not given, the entire array of tensors is
returned as an ndarray.
zero_copy_only: If True, an exception will be raised if the
conversion to a NumPy array would require copying the
underlying data (e.g. in presence of nulls, or for
non-primitive types). This argument is currently ignored, so
zero-copy isn't enforced even if this argument is true.
Returns:
The corresponding tensor element as an ndarray if an index was
given, or the entire array of tensors as an ndarray otherwise.
"""
# TODO(Clark): Enforce zero_copy_only.
# TODO(Clark): Support strides?
# Buffers schema:
# [None, offset_buffer, None, data_buffer]
buffers = self.buffers()
data_buffer = buffers[3]
storage_list_type = self.storage.type
value_type = storage_list_type.value_type
ext_dtype = value_type.to_pandas_dtype()
shape = self.type.shape
if pa.types.is_boolean(value_type):
# Arrow boolean array buffers are bit-packed, with 8 entries per byte,
# and are accessed via bit offsets.
buffer_item_width = value_type.bit_width
else:
# We assume all other array types are accessed via byte array
# offsets.
buffer_item_width = value_type.bit_width // 8
# Number of items per inner ndarray.
num_items_per_element = np.prod(shape) if shape else 1
# Base offset into data buffer, e.g. due to zero-copy slice.
buffer_offset = self.offset * num_items_per_element
# Offset of array data in buffer.
offset = buffer_item_width * buffer_offset
if index is not None:
# Getting a single tensor element of the array.
offset_buffer = buffers[1]
offset_array = np.ndarray(
(len(self),), buffer=offset_buffer, dtype=self.OFFSET_DTYPE
)
# Offset into array to reach logical index.
index_offset = offset_array[index]
# Add the index offset to the base offset.
offset += buffer_item_width * index_offset
else:
# Getting the entire array of tensors.
shape = (len(self),) + shape
if pa.types.is_boolean(value_type):
# Special handling for boolean arrays, since Arrow bit-packs boolean arrays
# while NumPy does not.
# Cast as uint8 array and let NumPy unpack into a boolean view.
# Offset into uint8 array, where each element is a bucket for 8 booleans.
byte_bucket_offset = offset // 8
# Offset for a specific boolean, within a uint8 array element.
bool_offset = offset % 8
# The number of uint8 array elements (buckets) that our slice spans.
# Note that, due to the offset for a specific boolean, the slice can span
# byte boundaries even if it contains less than 8 booleans.
num_boolean_byte_buckets = 1 + ((bool_offset + np.prod(shape) - 1) // 8)
# Construct the uint8 array view on the buffer.
arr = np.ndarray(
(num_boolean_byte_buckets,),
dtype=np.uint8,
buffer=data_buffer,
offset=byte_bucket_offset,
)
# Unpack into a byte per boolean, using LSB bit-packed ordering.
arr = np.unpackbits(arr, bitorder="little")
# Interpret buffer as boolean array.
return np.ndarray(shape, dtype=np.bool_, buffer=arr, offset=bool_offset)
return np.ndarray(shape, dtype=ext_dtype, buffer=data_buffer, offset=offset)
def to_numpy(self, zero_copy_only: bool = True):
"""
Convert the entire array of tensors into a single ndarray.
Args:
zero_copy_only: If True, an exception will be raised if the
conversion to a NumPy array would require copying the
underlying data (e.g. in presence of nulls, or for
non-primitive types). This argument is currently ignored, so
zero-copy isn't enforced even if this argument is true.
Returns:
A single ndarray representing the entire array of tensors.
"""
return self._to_numpy(zero_copy_only=zero_copy_only)
|
ray-project/ray
|
python/ray/data/extensions/tensor_extension.py
|
Python
|
apache-2.0
| 49,546
|
import glob
import os
import shutil
import time
from pyrannosaurus.clients.metadata import MetadataClient
from pyrannosaurus.utils import zip
from app import utils
def build(filename):
client = MetadataClient()
u, p, ip = utils.retrieve_credentials()
ip = True if ip == 'True' else False
client.login(u, p, is_production=ip)
if os.path.exists('.build'):
shutil.rmtree('.build/')
os.mkdir('.build/')
print "kc"
print filename
name, type = (filename.split("."))
utils.build_package(name, type, '.build/')
metadata_dir = utils.METADATA_DIRS.get(type, None)
os.mkdir('.build/' + metadata_dir)
src_dir = 'src/' + metadata_dir + '/' + filename
bld_dir = '.build/' + metadata_dir + '/'
if type in utils.CODE_FILES:
for file in glob.glob(src_dir + "*"):
shutil.copy(file, bld_dir)
else:
shutil.copyfile(src_dir, bld_dir)
zip('.build')
deploy_request = client.deploy('deploy.zip')
while True:
deploy_status = client.check_deploy_status(deploy_request.id)
if deploy_status.done:
break
else:
print deploy_status.status
time.sleep(3)
deploy_response = client.check_deploy_status(deploy_request.id)
print deploy_response
print "Deployment %s %s" % (deploy_response.id, deploy_response.status)
def deploy():
client = MetadataClient()
u, p, ip = utils.retrieve_credentials()
client.login(u, p, is_production=p)
zip('src/')
deploy_request = client.deploy('deploy.zip')
while True:
deploy_status = client.check_deploy_status(deploy_request.id)
if deploy_status.done:
break
else:
print deploy_status.status
time.sleep(3)
deploy_response = client.check_deploy_status(deploy_request.id)
print "Deployment %s %s" % (deploy_response.id, deploy_response.status)
|
kcshafer/slinky
|
app/deploy.py
|
Python
|
gpl-2.0
| 1,919
|
# -*- coding: utf-8 -*-
'''
The static grains, these are the core, or built in grains.
When grains are loaded they are not loaded in the same way that modules are
loaded, grain functions are detected and executed, the functions MUST
return a dict which will be applied to the main grains dict. This module
will always be executed first, so that any grains loaded here in the core
module can be overwritten just by returning dict keys with the same value
as those returned here
'''
# Import python libs
import os
import socket
import sys
import re
import platform
import logging
import locale
# Extend the default list of supported distros. This will be used for the
# /etc/DISTRO-release checking that is part of platform.linux_distribution()
from platform import _supported_dists
_supported_dists += ('arch', 'mageia', 'meego', 'vmware', 'bluewhite64',
'slamd64', 'ovs', 'system', 'mint', 'oracle')
# Import salt libs
import salt.log
import salt.utils
import salt.utils.network
# Solve the Chicken and egg problem where grains need to run before any
# of the modules are loaded and are generally available for any usage.
import salt.modules.cmdmod
__salt__ = {
'cmd.run': salt.modules.cmdmod._run_quiet,
'cmd.run_all': salt.modules.cmdmod._run_all_quiet
}
log = logging.getLogger(__name__)
HAS_WMI = False
if salt.utils.is_windows():
# attempt to import the python wmi module
# the Windows minion uses WMI for some of its grains
try:
import wmi
import salt.utils.winapi
HAS_WMI = True
except ImportError:
log.exception(
'Unable to import Python wmi module, some core grains '
'will be missing'
)
def _windows_cpudata():
'''
Return some CPU information on Windows minions
'''
# Provides:
# num_cpus
# cpu_model
grains = {}
if 'NUMBER_OF_PROCESSORS' in os.environ:
# Cast to int so that the logic isn't broken when used as a
# conditional in templating. Also follows _linux_cpudata()
try:
grains['num_cpus'] = int(os.environ['NUMBER_OF_PROCESSORS'])
except ValueError:
grains['num_cpus'] = 1
grains['cpu_model'] = platform.processor()
return grains
def _linux_cpudata():
'''
Return some CPU information for Linux minions
'''
# Provides:
# num_cpus
# cpu_model
# cpu_flags
grains = {}
cpuinfo = '/proc/cpuinfo'
# Parse over the cpuinfo file
if os.path.isfile(cpuinfo):
with salt.utils.fopen(cpuinfo, 'r') as _fp:
for line in _fp:
comps = line.split(':')
if not len(comps) > 1:
continue
key = comps[0].strip()
val = comps[1].strip()
if key == 'processor':
grains['num_cpus'] = int(val) + 1
elif key == 'model name':
grains['cpu_model'] = val
elif key == 'flags':
grains['cpu_flags'] = val.split()
elif key == 'Features':
grains['cpu_flags'] = val.split()
# ARM support - /proc/cpuinfo
#
# Processor : ARMv6-compatible processor rev 7 (v6l)
# BogoMIPS : 697.95
# Features : swp half thumb fastmult vfp edsp java tls
# CPU implementer : 0x41
# CPU architecture: 7
# CPU variant : 0x0
# CPU part : 0xb76
# CPU revision : 7
#
# Hardware : BCM2708
# Revision : 0002
# Serial : 00000000XXXXXXXX
elif key == 'Processor':
grains['cpu_model'] = val.split('-')[0]
grains['num_cpus'] = 1
if 'num_cpus' not in grains:
grains['num_cpus'] = 0
if 'cpu_model' not in grains:
grains['cpu_model'] = 'Unknown'
if 'cpu_flags' not in grains:
grains['cpu_flags'] = []
return grains
def _linux_gpu_data():
'''
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
'''
lspci = salt.utils.which('lspci')
if not lspci:
log.info(
'The `lspci` binary is not available on the system. GPU grains '
'will not be available.'
)
return {}
elif __opts__.get('enable_gpu_grains', None) is False:
log.info(
'Skipping lspci call because enable_gpu_grains was set to False '
'in the config. GPU grains will not be available.'
)
return {}
# dominant gpu vendors to search for (MUST be lowercase for matching below)
known_vendors = ['nvidia', 'amd', 'ati', 'intel']
devs = []
try:
lspci_out = __salt__['cmd.run']('lspci -vmm')
cur_dev = {}
error = False
# Add a blank element to the lspci_out.splitlines() list,
# otherwise the last device is not evaluated as a cur_dev and ignored.
lspci_list = lspci_out.splitlines()
lspci_list.append('')
for line in lspci_list:
# check for record-separating empty lines
if line == '':
if cur_dev.get('Class', '') == 'VGA compatible controller':
devs.append(cur_dev)
# XXX; may also need to search for "3D controller"
cur_dev = {}
continue
if re.match(r'^\w+:\s+.*', line):
key, val = line.split(':', 1)
cur_dev[key.strip()] = val.strip()
else:
error = True
log.debug('Unexpected lspci output: \'{0}\''.format(line))
if error:
log.warn(
'Error loading grains, unexpected linux_gpu_data output, '
'check that you have a valid shell configured and '
'permissions to run lspci command'
)
except OSError:
pass
gpus = []
for gpu in devs:
vendor_strings = gpu['Vendor'].lower().split()
# default vendor to 'unknown', overwrite if we match a known one
vendor = 'unknown'
for name in known_vendors:
# search for an 'expected' vendor name in the list of strings
if name in vendor_strings:
vendor = name
break
gpus.append({'vendor': vendor, 'model': gpu['Device']})
grains = {}
grains['num_gpus'] = len(gpus)
grains['gpus'] = gpus
return grains
def _netbsd_gpu_data():
'''
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
'''
known_vendors = ['nvidia', 'amd', 'ati', 'intel', 'cirrus logic', 'vmware']
gpus = []
try:
pcictl_out = __salt__['cmd.run']('pcictl pci0 list')
for line in pcictl_out.splitlines():
for vendor in known_vendors:
m = re.match(
r'[0-9:]+ ({0}) (.+) \(VGA .+\)'.format(vendor),
line,
re.IGNORECASE
)
if m:
gpus.append({'vendor': m.group(1), 'model': m.group(2)})
except OSError:
pass
grains = {}
grains['num_gpus'] = len(gpus)
grains['gpus'] = gpus
return grains
def _osx_gpudata():
'''
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
'''
gpus = []
try:
pcictl_out = __salt__['cmd.run']('system_profiler SPDisplaysDataType')
for line in pcictl_out.splitlines():
fieldname, _, fieldval = line.partition(': ')
if fieldname.strip() == "Chipset Model":
vendor, _, model = fieldval.partition(' ')
vendor = vendor.lower()
gpus.append({'vendor': vendor, 'model': model})
except OSError:
pass
grains = {}
grains['num_gpus'] = len(gpus)
grains['gpus'] = gpus
return grains
def _bsd_cpudata(osdata):
'''
Return CPU information for BSD-like systems
'''
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
sysctl = salt.utils.which('sysctl')
arch = salt.utils.which('arch')
cmds = {}
if sysctl:
cmds.update({
'num_cpus': '{0} -n hw.ncpu'.format(sysctl),
'cpuarch': '{0} -n hw.machine'.format(sysctl),
'cpu_model': '{0} -n hw.model'.format(sysctl),
})
if arch and osdata['kernel'] == 'OpenBSD':
cmds['cpuarch'] = '{0} -s'.format(arch)
if osdata['kernel'] == 'Darwin':
cmds['cpu_model'] = '{0} -n machdep.cpu.brand_string'.format(sysctl)
cmds['cpu_flags'] = '{0} -n machdep.cpu.features'.format(sysctl)
grains = dict([(k, __salt__['cmd.run'](v)) for k, v in cmds.items()])
if 'cpu_flags' in grains and isinstance(grains['cpu_flags'], basestring):
grains['cpu_flags'] = grains['cpu_flags'].split(' ')
if osdata['kernel'] == 'NetBSD':
grains['cpu_flags'] = []
for line in __salt__['cmd.run']('cpuctl identify 0').splitlines():
m = re.match(r'cpu[0-9]:\ features[0-9]?\ .+<(.+)>', line)
if m:
flag = m.group(1).split(',')
grains['cpu_flags'].extend(flag)
if osdata['kernel'] == 'FreeBSD' and os.path.isfile('/var/run/dmesg.boot'):
grains['cpu_flags'] = []
# TODO: at least it needs to be tested for BSD other then FreeBSD
with salt.utils.fopen('/var/run/dmesg.boot', 'r') as _fp:
cpu_here = False
for line in _fp:
if line.startswith('CPU: '):
cpu_here = True # starts CPU descr
continue
if cpu_here:
if not line.startswith(' '):
break # game over
if 'Features' in line:
start = line.find('<')
end = line.find('>')
if start > 0 and end > 0:
flag = line[start + 1:end].split(',')
grains['cpu_flags'].extend(flag)
try:
grains['num_cpus'] = int(grains['num_cpus'])
except ValueError:
grains['num_cpus'] = 1
return grains
def _sunos_cpudata():
'''
Return the CPU information for Solaris-like systems
'''
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
grains = {}
grains['cpu_flags'] = []
grains['cpuarch'] = __salt__['cmd.run']('uname -p')
psrinfo = '/usr/sbin/psrinfo 2>/dev/null'
grains['num_cpus'] = len(__salt__['cmd.run'](psrinfo).splitlines())
kstat_info = 'kstat -p cpu_info:0:*:brand'
for line in __salt__['cmd.run'](kstat_info).splitlines():
match = re.match(r'(\w+:\d+:\w+\d+:\w+)\s+(.+)', line)
if match:
grains['cpu_model'] = match.group(2)
isainfo = 'isainfo -n -v'
for line in __salt__['cmd.run'](isainfo).splitlines():
match = re.match(r'^\s+(.+)', line)
if match:
cpu_flags = match.group(1).split()
grains['cpu_flags'].extend(cpu_flags)
return grains
def _memdata(osdata):
'''
Gather information about the system memory
'''
# Provides:
# mem_total
grains = {'mem_total': 0}
if osdata['kernel'] == 'Linux':
meminfo = '/proc/meminfo'
if os.path.isfile(meminfo):
with salt.utils.fopen(meminfo, 'r') as ifile:
for line in ifile:
comps = line.rstrip('\n').split(':')
if not len(comps) > 1:
continue
if comps[0].strip() == 'MemTotal':
grains['mem_total'] = int(comps[1].split()[0]) / 1024
elif osdata['kernel'] in ('FreeBSD', 'OpenBSD', 'NetBSD', 'Darwin'):
sysctl = salt.utils.which('sysctl')
if sysctl:
if osdata['kernel'] == 'Darwin':
mem = __salt__['cmd.run']('{0} -n hw.memsize'.format(sysctl))
else:
mem = __salt__['cmd.run']('{0} -n hw.physmem'.format(sysctl))
if (osdata['kernel'] == 'NetBSD' and mem.startswith('-')):
mem = __salt__['cmd.run']('{0} -n hw.physmem64'.format(sysctl))
grains['mem_total'] = int(mem) / 1024 / 1024
elif osdata['kernel'] == 'SunOS':
prtconf = '/usr/sbin/prtconf 2>/dev/null'
for line in __salt__['cmd.run'](prtconf).splitlines():
comps = line.split(' ')
if comps[0].strip() == 'Memory' and comps[1].strip() == 'size:':
grains['mem_total'] = int(comps[2].strip())
elif osdata['kernel'] == 'Windows' and HAS_WMI:
with salt.utils.winapi.Com():
wmi_c = wmi.WMI()
# this is a list of each stick of ram in a system
# WMI returns it as the string value of the number of bytes
tot_bytes = sum(map(lambda x: int(x.Capacity),
wmi_c.Win32_PhysicalMemory()), 0)
# return memory info in gigabytes
grains['mem_total'] = int(tot_bytes / (1024 ** 2))
return grains
def _virtual(osdata):
'''
Returns what type of virtual hardware is under the hood, kvm or physical
'''
# This is going to be a monster, if you are running a vm you can test this
# grain with please submit patches!
# Provides:
# virtual
# virtual_subtype
grains = {'virtual': 'physical'}
for command in ('dmidecode', 'lspci', 'dmesg'):
args = []
if osdata['kernel'] == 'Darwin':
command = 'system_profiler'
args = ['SPDisplaysDataType']
cmd = salt.utils.which(command)
if not cmd:
continue
cmd = '%s %s' % (command, ' '.join(args))
ret = __salt__['cmd.run_all'](cmd)
if ret['retcode'] > 0:
if salt.log.is_logging_configured():
if salt.utils.is_windows():
continue
log.warn(
'Although \'{0}\' was found in path, the current user '
'cannot execute it. Grains output might not be '
'accurate.'.format(command)
)
continue
output = ret['stdout']
if command == "system_profiler":
macoutput = output.lower()
if '0x1ab8' in macoutput:
grains['virtual'] = 'Parallels'
if 'parallels' in macoutput:
grains['virtual'] = 'Parallels'
if 'vmware' in macoutput:
grains['virtual'] = 'VMware'
if '0x15ad' in macoutput:
grains['virtual'] = 'VMware'
if 'virtualbox' in macoutput:
grains['virtual'] = 'VirtualBox'
# Break out of the loop so the next log message is not issued
break
elif command == 'dmidecode' or command == 'dmesg':
# Product Name: VirtualBox
if 'Vendor: QEMU' in output:
# FIXME: Make this detect between kvm or qemu
grains['virtual'] = 'kvm'
if 'Vendor: Bochs' in output:
grains['virtual'] = 'kvm'
# Product Name: (oVirt) www.ovirt.org
# Red Hat Community virtualization Project based on kvm
elif 'Manufacturer: oVirt' in output:
grains['virtual'] = 'kvm'
elif 'VirtualBox' in output:
grains['virtual'] = 'VirtualBox'
# Product Name: VMware Virtual Platform
elif 'VMware' in output:
grains['virtual'] = 'VMware'
# Manufacturer: Microsoft Corporation
# Product Name: Virtual Machine
elif ': Microsoft' in output and 'Virtual Machine' in output:
grains['virtual'] = 'VirtualPC'
# Manufacturer: Parallels Software International Inc.
elif 'Parallels Software' in output:
grains['virtual'] = 'Parallels'
# Break out of the loop, lspci parsing is not necessary
break
elif command == 'lspci':
# dmidecode not available or the user does not have the necessary
# permissions
model = output.lower()
if 'vmware' in model:
grains['virtual'] = 'VMware'
# 00:04.0 System peripheral: InnoTek Systemberatung GmbH VirtualBox Guest Service
elif 'virtualbox' in model:
grains['virtual'] = 'VirtualBox'
elif 'qemu' in model:
grains['virtual'] = 'kvm'
elif 'virtio' in model:
grains['virtual'] = 'kvm'
# Break out of the loop so the next log message is not issued
break
else:
log.warn(
'The tools \'dmidecode\', \'lspci\' and \'dmesg\' failed to execute '
'because they do not exist on the system of the user running '
'this instance or the user does not have the necessary permissions '
'to execute them. Grains output might not be accurate.'
)
choices = ('Linux', 'OpenBSD', 'HP-UX')
isdir = os.path.isdir
sysctl = salt.utils.which('sysctl')
if osdata['kernel'] in choices:
if isdir('/proc/vz'):
if os.path.isfile('/proc/vz/version'):
grains['virtual'] = 'openvzhn'
else:
grains['virtual'] = 'openvzve'
elif isdir('/proc/sys/xen') or isdir('/sys/bus/xen') or isdir('/proc/xen'):
if os.path.isfile('/proc/xen/xsd_kva'):
# Tested on CentOS 5.3 / 2.6.18-194.26.1.el5xen
# Tested on CentOS 5.4 / 2.6.18-164.15.1.el5xen
grains['virtual_subtype'] = 'Xen Dom0'
else:
if grains.get('productname', '') == 'HVM domU':
# Requires dmidecode!
grains['virtual_subtype'] = 'Xen HVM DomU'
elif os.path.isfile('/proc/xen/capabilities') and os.access('/proc/xen/capabilities', os.R_OK):
caps = salt.utils.fopen('/proc/xen/capabilities')
if 'control_d' not in caps.read():
# Tested on CentOS 5.5 / 2.6.18-194.3.1.el5xen
grains['virtual_subtype'] = 'Xen PV DomU'
else:
# Shouldn't get to this, but just in case
grains['virtual_subtype'] = 'Xen Dom0'
caps.close()
# Tested on Fedora 10 / 2.6.27.30-170.2.82 with xen
# Tested on Fedora 15 / 2.6.41.4-1 without running xen
elif isdir('/sys/bus/xen'):
if 'xen:' in __salt__['cmd.run']('dmesg').lower():
grains['virtual_subtype'] = 'Xen PV DomU'
elif os.listdir('/sys/bus/xen/drivers'):
# An actual DomU will have several drivers
# whereas a paravirt ops kernel will not.
grains['virtual_subtype'] = 'Xen PV DomU'
# If a Dom0 or DomU was detected, obviously this is xen
if 'dom' in grains.get('virtual_subtype', '').lower():
grains['virtual'] = 'xen'
if os.path.isfile('/proc/cpuinfo'):
if 'QEMU Virtual CPU' in salt.utils.fopen('/proc/cpuinfo', 'r').read():
grains['virtual'] = 'kvm'
elif osdata['kernel'] == 'FreeBSD':
kenv = salt.utils.which('kenv')
if kenv:
product = __salt__['cmd.run']('{0} smbios.system.product'.format(kenv))
maker = __salt__['cmd.run']('{0} smbios.system.maker'.format(kenv))
if product.startswith('VMware'):
grains['virtual'] = 'VMware'
if maker.startswith('Xen'):
grains['virtual_subtype'] = '{0} {1}'.format(maker, product)
grains['virtual'] = 'xen'
if sysctl:
model = __salt__['cmd.run']('{0} hw.model'.format(sysctl))
jail = __salt__['cmd.run']('{0} -n security.jail.jailed'.format(sysctl))
if jail == '1':
grains['virtual_subtype'] = 'jail'
if 'QEMU Virtual CPU' in model:
grains['virtual'] = 'kvm'
elif osdata['kernel'] == 'SunOS':
# Check if it's a "regular" zone. (i.e. Solaris 10/11 zone)
zonename = salt.utils.which('zonename')
if zonename:
zone = __salt__['cmd.run']('{0}'.format(zonename))
if zone != 'global':
grains['virtual'] = 'zone'
if osdata['os'] == 'SmartOS':
grains.update(_smartos_zone_data())
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
if isdir('/.SUNWnative'):
grains['virtual'] = 'zone'
elif osdata['kernel'] == 'NetBSD':
if sysctl:
if 'QEMU Virtual CPU' in __salt__['cmd.run'](
'{0} -n machdep.cpu_brand'.format(sysctl)):
grains['virtual'] = 'kvm'
elif not 'invalid' in __salt__['cmd.run'](
'{0} -n machdep.xen.suspend'.format(sysctl)):
grains['virtual'] = 'Xen PV DomU'
elif 'VMware' in __salt__['cmd.run'](
'{0} -n machdep.dmi.system-vendor'.format(sysctl)):
grains['virtual'] = 'VMware'
# NetBSD has Xen dom0 support
elif __salt__['cmd.run'](
'{0} -n machdep.idle-mechanism'.format(sysctl)) == 'xen':
if os.path.isfile('/var/run/xenconsoled.pid'):
grains['virtual_subtype'] = 'Xen Dom0'
return grains
def _ps(osdata):
'''
Return the ps grain
'''
grains = {}
bsd_choices = ('FreeBSD', 'NetBSD', 'OpenBSD', 'MacOS')
if osdata['os'] in bsd_choices:
grains['ps'] = 'ps auxwww'
elif osdata['os_family'] == 'Solaris':
grains['ps'] = '/usr/ucb/ps auxwww'
elif osdata['os'] == 'Windows':
grains['ps'] = 'tasklist.exe'
elif osdata.get('virtual', '') == 'openvzhn':
grains['ps'] = 'ps -fH -p $(grep -l \"^envID:[[:space:]]*0\\$\" /proc/[0-9]*/status | sed -e \"s=/proc/\\([0-9]*\\)/.*=\\1=\") | awk \'{ $7=\"\"; print }\''
elif osdata['os_family'] == 'Debian':
grains['ps'] = 'ps -efHww'
else:
grains['ps'] = 'ps -efH'
return grains
def _windows_platform_data():
'''
Use the platform module for as much as we can.
'''
# Provides:
# osmanufacturer
# manufacturer
# productname
# biosversion
# osfullname
# timezone
# windowsdomain
if not HAS_WMI:
return {}
with salt.utils.winapi.Com():
wmi_c = wmi.WMI()
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394102%28v=vs.85%29.aspx
systeminfo = wmi_c.Win32_ComputerSystem()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394239%28v=vs.85%29.aspx
osinfo = wmi_c.Win32_OperatingSystem()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394077(v=vs.85).aspx
biosinfo = wmi_c.Win32_BIOS()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394498(v=vs.85).aspx
timeinfo = wmi_c.Win32_TimeZone()[0]
# the name of the OS comes with a bunch of other data about the install
# location. For example:
# 'Microsoft Windows Server 2008 R2 Standard |C:\\Windows|\\Device\\Harddisk0\\Partition2'
(osfullname, _) = osinfo.Name.split('|', 1)
osfullname = osfullname.strip()
grains = {
'osmanufacturer': osinfo.Manufacturer,
'manufacturer': systeminfo.Manufacturer,
'productname': systeminfo.Model,
# bios name had a bunch of whitespace appended to it in my testing
# 'PhoenixBIOS 4.0 Release 6.0 '
'biosversion': biosinfo.Name.strip(),
'osfullname': osfullname,
'timezone': timeinfo.Description,
'windowsdomain': systeminfo.Domain,
}
# test for virtualized environments
# I only had VMware available so the rest are unvalidated
if 'VRTUAL' in biosinfo.Version: # (not a typo)
grains['virtual'] = 'HyperV'
elif 'A M I' in biosinfo.Version:
grains['virtual'] = 'VirtualPC'
elif 'VMware' in systeminfo.Model:
grains['virtual'] = 'VMware'
elif 'VirtualBox' in systeminfo.Model:
grains['virtual'] = 'VirtualBox'
elif 'Xen' in biosinfo.Version:
grains['virtual'] = 'Xen'
if 'HVM domU' in systeminfo.Model:
grains['virtual_subtype'] = 'HVM domU'
return grains
def id_():
'''
Return the id
'''
return {'id': __opts__.get('id', '')}
_REPLACE_LINUX_RE = re.compile(r'linux', re.IGNORECASE)
# This maps (at most) the first ten characters (no spaces, lowercased) of
# 'osfullname' to the 'os' grain that Salt traditionally uses.
# Please see os_data() and _supported_dists.
# If your system is not detecting properly it likely needs an entry here.
_OS_NAME_MAP = {
'redhatente': 'RedHat',
'gentoobase': 'Gentoo',
'archarm': 'Arch ARM',
'arch': 'Arch',
'debian': 'Debian',
'debiangnu/': 'Debian',
'raspbiangn': 'Raspbian',
'fedoraremi': 'Fedora',
'amazonami': 'Amazon',
'alt': 'ALT',
'oracleserv': 'OEL',
'cloudserve': 'CloudLinux',
'pidora': 'Fedora',
'scientific': 'ScientificLinux'
}
# Map the 'os' grain to the 'os_family' grain
# These should always be capitalized entries as the lookup comes
# post-_OS_NAME_MAP. If your system is having trouble with detection, please
# make sure that the 'os' grain is capitalized and working correctly first.
_OS_FAMILY_MAP = {
'Ubuntu': 'Debian',
'Fedora': 'RedHat',
'CentOS': 'RedHat',
'GoOSe': 'RedHat',
'Scientific': 'RedHat',
'Amazon': 'RedHat',
'CloudLinux': 'RedHat',
'OVS': 'RedHat',
'OEL': 'RedHat',
'XCP': 'RedHat',
'XenServer': 'RedHat',
'Mandrake': 'Mandriva',
'ESXi': 'VMWare',
'Mint': 'Debian',
'VMWareESX': 'VMWare',
'Bluewhite64': 'Bluewhite',
'Slamd64': 'Slackware',
'SLES': 'Suse',
'SUSE Enterprise Server': 'Suse',
'SUSE Enterprise Server': 'Suse',
'SLED': 'Suse',
'openSUSE': 'Suse',
'SUSE': 'Suse',
'Solaris': 'Solaris',
'SmartOS': 'Solaris',
'OpenIndiana Development': 'Solaris',
'Arch ARM': 'Arch',
'ALT': 'RedHat',
'Trisquel': 'Debian',
'GCEL': 'Debian',
'Linaro': 'Debian',
'elementary OS': 'Debian',
'ScientificLinux': 'RedHat',
'Raspbian': 'Debian'
}
def os_data():
'''
Return grains pertaining to the operating system
'''
grains = {
'num_gpus': 0,
'gpus': [],
}
# Windows Server 2008 64-bit
# ('Windows', 'MINIONNAME', '2008ServerR2', '6.1.7601', 'AMD64', 'Intel64 Fam ily 6 Model 23 Stepping 6, GenuineIntel')
# Ubuntu 10.04
# ('Linux', 'MINIONNAME', '2.6.32-38-server', '#83-Ubuntu SMP Wed Jan 4 11:26:59 UTC 2012', 'x86_64', '')
(grains['kernel'], grains['nodename'],
grains['kernelrelease'], version, grains['cpuarch'], _) = platform.uname()
if salt.utils.is_windows():
grains['osrelease'] = grains['kernelrelease']
grains['osversion'] = grains['kernelrelease'] = version
grains['os'] = 'Windows'
grains['os_family'] = 'Windows'
grains.update(_memdata(grains))
grains.update(_windows_platform_data())
grains.update(_windows_cpudata())
grains.update(_ps(grains))
return grains
elif salt.utils.is_linux():
# Add lsb grains on any distro with lsb-release
try:
import lsb_release
release = lsb_release.get_distro_information()
for key, value in release.iteritems():
key = key.lower()
lsb_param = 'lsb_{0}{1}'.format(
'' if key.startswith('distrib_') else 'distrib_',
key
)
grains[lsb_param] = value
except ImportError:
# if the python library isn't available, default to regex
if os.path.isfile('/etc/lsb-release'):
with salt.utils.fopen('/etc/lsb-release') as ifile:
for line in ifile:
# Matches any possible format:
# DISTRIB_ID="Ubuntu"
# DISTRIB_ID='Mageia'
# DISTRIB_ID=Fedora
# DISTRIB_RELEASE='10.10'
# DISTRIB_CODENAME='squeeze'
# DISTRIB_DESCRIPTION='Ubuntu 10.10'
regex = re.compile('^(DISTRIB_(?:ID|RELEASE|CODENAME|DESCRIPTION))=(?:\'|")?([\\w\\s\\.-_]+)(?:\'|")?')
match = regex.match(line.rstrip('\n'))
if match:
# Adds: lsb_distrib_{id,release,codename,description}
grains['lsb_{0}'.format(match.groups()[0].lower())] = match.groups()[1].rstrip()
elif os.path.isfile('/etc/os-release'):
# Arch ARM Linux
with salt.utils.fopen('/etc/os-release') as ifile:
# Imitate lsb-release
for line in ifile:
# NAME="Arch Linux ARM"
# ID=archarm
# ID_LIKE=arch
# PRETTY_NAME="Arch Linux ARM"
# ANSI_COLOR="0;36"
# HOME_URL="http://archlinuxarm.org/"
# SUPPORT_URL="https://archlinuxarm.org/forum"
# BUG_REPORT_URL="https://github.com/archlinuxarm/PKGBUILDs/issues"
regex = re.compile('^([\\w]+)=(?:\'|")?([\\w\\s\\.-_]+)(?:\'|")?')
match = regex.match(line.rstrip('\n'))
if match:
name, value = match.groups()
if name.lower() == 'name':
grains['lsb_distrib_id'] = value.strip()
elif os.path.isfile('/etc/altlinux-release'):
# ALT Linux
grains['lsb_distrib_id'] = 'altlinux'
with salt.utils.fopen('/etc/altlinux-release') as ifile:
# This file is symlinked to from:
# /etc/fedora-release
# /etc/redhat-release
# /etc/system-release
for line in ifile:
# ALT Linux Sisyphus (unstable)
comps = line.split()
if comps[0] == 'ALT':
grains['lsb_distrib_release'] = comps[2]
grains['lsb_distrib_codename'] = \
comps[3].replace('(', '').replace(')', '')
elif os.path.isfile('/etc/centos-release'):
# CentOS Linux
grains['lsb_distrib_id'] = 'CentOS'
with salt.utils.fopen('/etc/centos-release') as ifile:
for line in ifile:
# Need to pull out the version and codename
# in the case of custom content in /etc/centos-release
find_release = re.compile(r'\d+\.\d+')
find_codename = re.compile(r'(?<=\()(.*?)(?=\))')
release = find_release.search(line)
codename = find_codename.search(line)
if release is not None:
grains['lsb_distrib_release'] = release.group()
if codename is not None:
grains['lsb_distrib_codename'] = codename.group()
# Use the already intelligent platform module to get distro info
# (though apparently it's not intelligent enough to strip quotes)
(osname, osrelease, oscodename) = \
[x.strip('"').strip("'") for x in
platform.linux_distribution(supported_dists=_supported_dists)]
# Try to assign these three names based on the lsb info, they tend to
# be more accurate than what python gets from /etc/DISTRO-release.
# It's worth noting that Ubuntu has patched their Python distribution
# so that platform.linux_distribution() does the /etc/lsb-release
# parsing, but we do it anyway here for the sake for full portability.
grains['osfullname'] = grains.get('lsb_distrib_id', osname).strip()
grains['osrelease'] = grains.get('lsb_distrib_release',
osrelease).strip()
grains['oscodename'] = grains.get('lsb_distrib_codename',
oscodename).strip()
distroname = _REPLACE_LINUX_RE.sub('', grains['osfullname']).strip()
# return the first ten characters with no spaces, lowercased
shortname = distroname.replace(' ', '').lower()[:10]
# this maps the long names from the /etc/DISTRO-release files to the
# traditional short names that Salt has used.
grains['os'] = _OS_NAME_MAP.get(shortname, distroname)
grains.update(_linux_cpudata())
grains.update(_linux_gpu_data())
elif grains['kernel'] == 'SunOS':
grains['os_family'] = 'Solaris'
uname_v = __salt__['cmd.run']('uname -v')
if 'joyent_' in uname_v:
# See https://github.com/joyent/smartos-live/issues/224
grains['os'] = grains['osfullname'] = 'SmartOS'
grains['osrelease'] = uname_v
elif os.path.isfile('/etc/release'):
with salt.utils.fopen('/etc/release', 'r') as fp_:
rel_data = fp_.read()
try:
release_re = r'(Solaris|OpenIndiana(?: Development)?)' \
r'\s+(\d+ \d+\/\d+|oi_\S+)?'
osname, osrelease = re.search(release_re,
rel_data).groups()
except AttributeError:
# Set a blank osrelease grain and fallback to 'Solaris'
# as the 'os' grain.
grains['os'] = grains['osfullname'] = 'Solaris'
grains['osrelease'] = ''
else:
grains['os'] = grains['osfullname'] = osname
grains['osrelease'] = osrelease
grains.update(_sunos_cpudata())
elif grains['kernel'] == 'VMkernel':
grains['os'] = 'ESXi'
elif grains['kernel'] == 'Darwin':
osrelease = __salt__['cmd.run']('sw_vers -productVersion')
grains['os'] = 'MacOS'
grains['osrelease'] = osrelease
grains.update(_bsd_cpudata(grains))
grains.update(_osx_gpudata())
else:
grains['os'] = grains['kernel']
if grains['kernel'] in ('FreeBSD', 'OpenBSD', 'NetBSD'):
grains.update(_bsd_cpudata(grains))
grains['osrelease'] = grains['kernelrelease'].split('-')[0]
if grains['kernel'] == 'NetBSD':
grains.update(_netbsd_gpu_data())
if not grains['os']:
grains['os'] = 'Unknown {0}'.format(grains['kernel'])
grains['os_family'] = 'Unknown'
else:
# this assigns family names based on the os name
# family defaults to the os name if not found
grains['os_family'] = _OS_FAMILY_MAP.get(grains['os'],
grains['os'])
# Build the osarch grain. This grain will be used for platform-specific
# considerations such as package management. Fall back to the CPU
# architecture.
if grains.get('os_family') == 'Debian':
osarch = __salt__['cmd.run']('dpkg --print-architecture').strip()
else:
osarch = grains['cpuarch']
grains['osarch'] = osarch
grains.update(_memdata(grains))
# Get the hardware and bios data
grains.update(_hw_data(grains))
# Load the virtual machine info
grains.update(_virtual(grains))
grains.update(_ps(grains))
# Load additional OS family grains
if grains['os_family'] == "RedHat":
grains['osmajorrelease'] = grains['osrelease'].split('.', 1)
grains['osfinger'] = '{os}-{ver}'.format(
os=grains['osfullname'],
ver=grains['osrelease'].partition('.')[0])
elif grains.get('osfullname') == 'Ubuntu':
grains['osfinger'] = '{os}-{ver}'.format(
os=grains['osfullname'],
ver=grains['osrelease'])
return grains
def locale_info():
'''
Provides
defaultlanguage
defaultencoding
'''
grains = {}
try:
(grains['defaultlanguage'], grains['defaultencoding']) = locale.getdefaultlocale()
except Exception:
# locale.getdefaultlocale can ValueError!! Catch anything else it
# might do, per #2205
grains['defaultlanguage'] = 'unknown'
grains['defaultencoding'] = 'unknown'
return grains
def hostname():
'''
Return fqdn, hostname, domainname
'''
# This is going to need some work
# Provides:
# fqdn
# host
# localhost
# domain
grains = {}
grains['localhost'] = socket.gethostname()
if '.' in socket.getfqdn():
grains['fqdn'] = socket.getfqdn()
else:
grains['fqdn'] = grains['localhost']
(grains['host'], grains['domain']) = grains['fqdn'].partition('.')[::2]
return grains
def append_domain():
'''
Return append_domain if set
'''
grain = {}
if 'append_domain' in __opts__:
grain['append_domain'] = __opts__['append_domain']
return grain
def ip4():
'''
Return a list of ipv4 addrs
'''
return {'ipv4': salt.utils.network.ip_addrs(include_loopback=True)}
def fqdn_ip4():
'''
Return a list of ipv4 addrs of fqdn
'''
try:
info = socket.getaddrinfo(hostname()['fqdn'], None, socket.AF_INET)
addrs = list(set(item[4][0] for item in info))
except socket.error:
addrs = []
return {'fqdn_ip4': addrs}
def ip6():
'''
Return a list of ipv6 addrs
'''
return {'ipv6': salt.utils.network.ip_addrs6(include_loopback=True)}
def fqdn_ip6():
'''
Return a list of ipv6 addrs of fqdn
'''
try:
info = socket.getaddrinfo(hostname()['fqdn'], None, socket.AF_INET6)
addrs = list(set(item[4][0] for item in info))
except socket.error:
addrs = []
return {'fqdn_ip6': addrs}
def ip_interfaces():
'''
Provide a dict of the connected interfaces and their ip addresses
'''
# Provides:
# ip_interfaces
ret = {}
ifaces = salt.utils.network.interfaces()
for face in ifaces:
iface_ips = []
for inet in ifaces[face].get('inet', []):
if 'address' in inet:
iface_ips.append(inet['address'])
for secondary in ifaces[face].get('secondary', []):
if 'address' in secondary:
iface_ips.append(secondary['address'])
ret[face] = iface_ips
return {'ip_interfaces': ret}
def path():
'''
Return the path
'''
# Provides:
# path
return {'path': os.environ['PATH'].strip()}
def pythonversion():
'''
Return the Python version
'''
# Provides:
# pythonversion
return {'pythonversion': list(sys.version_info)}
def pythonpath():
'''
Return the Python path
'''
# Provides:
# pythonpath
return {'pythonpath': sys.path}
def saltpath():
'''
Return the path of the salt module
'''
# Provides:
# saltpath
salt_path = os.path.abspath(os.path.join(__file__, os.path.pardir))
return {'saltpath': os.path.dirname(salt_path)}
def saltversion():
'''
Return the version of salt
'''
# Provides:
# saltversion
from salt.version import __version__
return {'saltversion': __version__}
def saltversioninfo():
'''
Return the version_info of salt
.. versionadded:: 0.17.0
'''
# Provides:
# saltversioninfo
from salt.version import __version_info__
return {'saltversioninfo': __version_info__}
# Relatively complex mini-algorithm to iterate over the various
# sections of dmidecode output and return matches for specific
# lines containing data we want, but only in the right section.
def _dmidecode_data(regex_dict):
'''
Parse the output of dmidecode in a generic fashion that can
be used for the multiple system types which have dmidecode.
'''
ret = {}
# No use running if dmidecode/smbios isn't in the path
if salt.utils.which('dmidecode'):
out = __salt__['cmd.run']('dmidecode')
elif salt.utils.which('smbios'):
out = __salt__['cmd.run']('smbios')
else:
return ret
for section in regex_dict:
section_found = False
# Look at every line for the right section
for line in out.splitlines():
if not line:
continue
# We've found it, woohoo!
if re.match(section, line):
section_found = True
continue
if not section_found:
continue
# Now that a section has been found, find the data
for item in regex_dict[section]:
# Examples:
# Product Name: 64639SU
# Version: 7LETC1WW (2.21 )
regex = re.compile(r'\s+{0}\s+(.*)$'.format(item))
grain = regex_dict[section][item]
# Skip to the next iteration if this grain
# has been found in the dmidecode output.
if grain in ret:
continue
match = regex.match(line)
# Finally, add the matched data to the grains returned
if match:
ret[grain] = match.group(1).strip()
return ret
def _hw_data(osdata):
'''
Get system specific hardware data from dmidecode
Provides
biosversion
productname
manufacturer
serialnumber
biosreleasedate
.. versionadded:: 0.9.5
'''
grains = {}
# TODO: *BSD dmidecode output
if osdata['kernel'] == 'Linux':
linux_dmi_regex = {
'BIOS [Ii]nformation': {
'[Vv]ersion:': 'biosversion',
'[Rr]elease [Dd]ate:': 'biosreleasedate',
},
'[Ss]ystem [Ii]nformation': {
'Manufacturer:': 'manufacturer',
'Product(?: Name)?:': 'productname',
'Serial Number:': 'serialnumber',
},
}
grains.update(_dmidecode_data(linux_dmi_regex))
elif osdata['kernel'] == 'SunOS':
sunos_dmi_regex = {
r'(.+)SMB_TYPE_BIOS\s\(BIOS [Ii]nformation\)': {
'[Vv]ersion [Ss]tring:': 'biosversion',
'[Rr]elease [Dd]ate:': 'biosreleasedate',
},
r'(.+)SMB_TYPE_SYSTEM\s\([Ss]ystem [Ii]nformation\)': {
'Manufacturer:': 'manufacturer',
'Product(?: Name)?:': 'productname',
'Serial Number:': 'serialnumber',
},
}
grains.update(_dmidecode_data(sunos_dmi_regex))
# On FreeBSD /bin/kenv (already in base system) can be used instead of dmidecode
elif osdata['kernel'] == 'FreeBSD':
kenv = salt.utils.which('kenv')
if kenv:
# In theory, it will be easier to add new fields to this later
fbsd_hwdata = {
'biosversion': 'smbios.bios.version',
'manufacturer': 'smbios.system.maker',
'serialnumber': 'smbios.system.serial',
'productname': 'smbios.system.product',
'biosreleasedate': 'smbios.bios.reldate',
}
for key, val in fbsd_hwdata.items():
grains[key] = __salt__['cmd.run']('{0} {1}'.format(kenv, val))
elif osdata['kernel'] == 'OpenBSD':
sysctl = salt.utils.which('sysctl')
hwdata = {'biosversion': 'hw.version',
'manufacturer': 'hw.vendor',
'productname': 'hw.product',
'serialnumber': 'hw.serialno'}
for key, oid in hwdata.items():
value = __salt__['cmd.run']('{0} -n {1}'.format(sysctl, oid))
if not value.endswith(' value is not available'):
grains[key] = value
elif osdata['kernel'] == 'NetBSD':
sysctl = salt.utils.which('sysctl')
nbsd_hwdata = {
'biosversion': 'machdep.dmi.board-version',
'manufacturer': 'machdep.dmi.system-vendor',
'serialnumber': 'machdep.dmi.system-serial',
'productname': 'machdep.dmi.system-product',
'biosreleasedate': 'machdep.dmi.bios-date',
}
for key, oid in nbsd_hwdata.items():
result = __salt__['cmd.run_all']('{0} -n {1}'.format(sysctl, oid))
if result['retcode'] == 0:
grains[key] = result['stdout']
return grains
def _smartos_zone_data():
'''
Return useful information from a SmartOS zone
'''
# Provides:
# pkgsrcversion
# imageversion
grains = {}
pkgsrcversion = re.compile('^release:\\s(.+)')
imageversion = re.compile('Image:\\s(.+)')
if os.path.isfile('/etc/pkgsrc_version'):
with salt.utils.fopen('/etc/pkgsrc_version', 'r') as fp_:
for line in fp_:
match = pkgsrcversion.match(line)
if match:
grains['pkgsrcversion'] = match.group(1)
if os.path.isfile('/etc/product'):
with salt.utils.fopen('/etc/product', 'r') as fp_:
for line in fp_:
match = imageversion.match(line)
if match:
grains['imageversion'] = match.group(1)
if 'pkgsrcversion' not in grains:
grains['pkgsrcversion'] = 'Unknown'
if 'imageversion' not in grains:
grains['imageversion'] = 'Unknown'
return grains
def get_server_id():
'''
Provides an integer based on the FQDN of a machine.
Useful as server-id in MySQL replication or anywhere else you'll need an ID like this.
'''
# Provides:
# server_id
return {'server_id': abs(hash(__opts__.get('id', '')) % (2 ** 31))}
def get_master():
'''
Provides the minion with the name of its master.
This is useful in states to target other services running on the master.
'''
# Provides:
# master
return {'master': __opts__.get('master', '')}
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
victorywang80/Maintenance
|
saltstack/src/salt/grains/core.py
|
Python
|
apache-2.0
| 47,280
|
"""
Django settings for snippod boilerplate project.
This is a base starter for snippod.
For more information on this file, see
https://github.com/shalomeir/snippod-boilerplate
"""
from snippod_boilerplate.settings.common import *
# from snippod_boilerplate.settings.config_dev import *
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$6(x*g_2g9l_*g8peb-@anl5^*8q!1w)k&e&2!i)t6$s8kia93'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', True)
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS += (
'debug_toolbar',
)
# MIDDLEWARE_CLASSES += (
# )
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
DATABASE_OPTIONS = {'charset': 'utf8'}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
# os.path.join(BASE_DIR, 'snippod_webapp/.tmp'), # grunt serve
os.path.join(BASE_DIR, 'snippod_webapp/dist/client'), #grunt
# os.path.join(BASE_DIR, 'static'),
)
COMPRESS_ENABLED = os.environ.get('COMPRESS_ENABLED', False)
#MEDIA FILE (user uploaded files)
# TEMPLATE_DIRS = (
# os.path.join(BASE_DIR, 'djangoapps/templates'),
# )
|
shalomeir/snippod-boilerplate
|
snippod_boilerplate/settings/dev.py
|
Python
|
mit
| 1,919
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-10-01 07:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cornerstone', '0005_auto_20190925_0730'),
]
operations = [
migrations.AddField(
model_name='cornerstoneenterprisecustomerconfiguration',
name='catalogs_to_transmit',
field=models.TextField(blank=True, help_text='A comma-separated list of catalog UUIDs to transmit.', null=True),
),
migrations.AddField(
model_name='historicalcornerstoneenterprisecustomerconfiguration',
name='catalogs_to_transmit',
field=models.TextField(blank=True, help_text='A comma-separated list of catalog UUIDs to transmit.', null=True),
),
]
|
edx/edx-enterprise
|
integrated_channels/cornerstone/migrations/0006_auto_20191001_0742.py
|
Python
|
agpl-3.0
| 824
|
# Copyright (C) 2014, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import shlex
import threading
import time
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _LE, _LW
from cinder.openstack.common import loopingcall
from cinder import utils
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
LOG = logging.getLogger(__name__)
SNM2_ENV = ('LANG=C STONAVM_HOME=/usr/stonavm '
'LD_LIBRARY_PATH=/usr/stonavm/lib '
'STONAVM_RSP_PASS=on STONAVM_ACT=on')
MAX_HOSTGROUPS = 127
MAX_HOSTGROUPS_ISCSI = 254
MAX_HLUN = 2047
EXEC_LOCK_PATH_BASE = basic_lib.LOCK_DIR + 'hsnm_'
EXEC_TIMEOUT = 10
EXEC_INTERVAL = 1
CHAP_TIMEOUT = 5
PAIRED = 12
DUMMY_LU = -1
class HBSDSNM2(basic_lib.HBSDBasicLib):
def __init__(self, conf):
super(HBSDSNM2, self).__init__(conf=conf)
self.unit_name = conf.hitachi_unit_name
self.hsnm_lock = threading.Lock()
self.hsnm_lock_file = ('%s%s'
% (EXEC_LOCK_PATH_BASE, self.unit_name))
copy_speed = conf.hitachi_copy_speed
if copy_speed <= 2:
self.pace = 'slow'
elif copy_speed == 3:
self.pace = 'normal'
else:
self.pace = 'prior'
def _wait_for_exec_hsnm(self, args, printflag, noretry, timeout, start):
lock = basic_lib.get_process_lock(self.hsnm_lock_file)
with self.hsnm_lock, lock:
ret, stdout, stderr = self.exec_command('env', args=args,
printflag=printflag)
if not ret or noretry:
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
if time.time() - start >= timeout:
LOG.error(_LE("snm2 command timeout."))
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
if (re.search('DMEC002047', stderr)
or re.search('DMEC002048', stderr)
or re.search('DMED09000A', stderr)
or re.search('DMED090026', stderr)
or re.search('DMED0E002B', stderr)
or re.search('DMER03006A', stderr)
or re.search('DMER030080', stderr)
or re.search('DMER0300B8', stderr)
or re.search('DMER0800CF', stderr)
or re.search('DMER0800D[0-6D]', stderr)
or re.search('DMES052602', stderr)):
LOG.error(_LE("Unexpected error occurs in snm2."))
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
def exec_hsnm(self, command, args, printflag=True, noretry=False,
timeout=EXEC_TIMEOUT, interval=EXEC_INTERVAL):
args = '%s %s %s' % (SNM2_ENV, command, args)
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_exec_hsnm, args, printflag,
noretry, timeout, time.time())
return loop.start(interval=interval).wait()
def _execute_with_exception(self, cmd, args, **kwargs):
ret, stdout, stderr = self.exec_hsnm(cmd, args, **kwargs)
if ret:
cmds = '%(cmd)s %(args)s' % {'cmd': cmd, 'args': args}
msg = basic_lib.output_err(
600, cmd=cmds, ret=ret, out=stdout, err=stderr)
raise exception.HBSDError(data=msg)
return ret, stdout, stderr
def _execute_and_return_stdout(self, cmd, args, **kwargs):
result = self._execute_with_exception(cmd, args, **kwargs)
return result[1]
def get_comm_version(self):
ret, stdout, stderr = self.exec_hsnm('auman', '-help')
m = re.search('Version (\d+).(\d+)', stdout)
if not m:
msg = basic_lib.output_err(
600, cmd='auman', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return '%s.%s' % (m.group(1), m.group(2))
def add_used_hlun(self, command, port, gid, used_list, ldev):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm(command,
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd=command, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[2:]:
line = shlex.split(line)
if not line:
continue
if line[0] == port and int(line[1][0:3]) == gid:
if int(line[2]) not in used_list:
used_list.append(int(line[2]))
if int(line[3]) == ldev:
hlu = int(line[2])
LOG.warning(_LW('ldev(%(ldev)d) is already mapped '
'(hlun: %(hlu)d)'),
{'ldev': ldev, 'hlu': hlu})
return hlu
return None
def _get_lu(self, lu=None):
# When 'lu' is 0, it should be true. So, it cannot remove 'is None'.
if lu is None:
args = '-unit %s' % self.unit_name
else:
args = '-unit %s -lu %s' % (self.unit_name, lu)
return self._execute_and_return_stdout('auluref', args)
def get_unused_ldev(self, ldev_range):
start = ldev_range[0]
end = ldev_range[1]
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('auluref', '-unit %s' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='auluref', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
free_ldev = start
lines = stdout.splitlines()
found = False
for line in lines[2:]:
line = shlex.split(line)
if not line:
continue
ldev_num = int(line[0])
if free_ldev > ldev_num:
continue
if free_ldev == ldev_num:
free_ldev += 1
else:
found = True
break
if free_ldev > end:
break
else:
found = True
if not found:
msg = basic_lib.output_err(648, resource='LDEV')
raise exception.HBSDError(message=msg)
return free_ldev
def get_hgname_gid(self, port, host_grp_name):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('auhgdef',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='auhgdef', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
is_target_port = False
for line in lines:
line = shlex.split(line)
if not line:
continue
if line[0] == 'Port' and line[1] == port:
is_target_port = True
continue
if is_target_port:
if line[0] == 'Port':
break
if not line[0].isdigit():
continue
gid = int(line[0])
if line[1] == host_grp_name:
return gid
return None
def get_unused_gid(self, group_range, port):
start = group_range[0]
end = group_range[1]
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('auhgdef',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='auhgdef', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
is_target_port = False
free_gid = start
found = False
for line in lines:
line = shlex.split(line)
if not line:
continue
if line[0] == 'Port' and line[1] == port:
is_target_port = True
continue
if is_target_port:
if line[0] == 'Port':
found = True
break
if not line[0].isdigit():
continue
gid = int(line[0])
if free_gid > gid:
continue
if free_gid == gid:
free_gid += 1
else:
found = True
break
if free_gid > end or free_gid > MAX_HOSTGROUPS:
break
else:
found = True
if not found:
msg = basic_lib.output_err(648, resource='GID')
raise exception.HBSDError(message=msg)
return free_gid
def comm_set_target_wwns(self, target_ports):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('aufibre1',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='aufibre1', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
target_wwns = {}
for line in lines[3:]:
if re.match('Transfer', line):
break
line = shlex.split(line)
if len(line) < 4:
continue
port = '%s%s' % (line[0], line[1])
if target_ports:
if port in target_ports:
target_wwns[port] = line[3]
else:
target_wwns[port] = line[3]
LOG.debug('target wwns: %s', target_wwns)
return target_wwns
def get_hostgroup_from_wwns(self, hostgroups, port, wwns, buf, login):
for pt in wwns:
for line in buf[port]['assigned']:
hgname = shlex.split(line[38:])[1][4:]
if not re.match(basic_lib.NAME_PREFIX, hgname):
continue
if pt.search(line[38:54]):
wwn = line[38:54]
gid = int(shlex.split(line[38:])[1][0:3])
is_detected = None
if login:
for line in buf[port]['detected']:
if pt.search(line[38:54]):
is_detected = True
break
else:
is_detected = False
hostgroups.append({'port': six.text_type(port), 'gid': gid,
'initiator_wwn': wwn,
'detected': is_detected})
def comm_get_hostgroup_info(self, hgs, wwns, target_ports, login=True):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('auhgwwn',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='auhgwwn', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
security_ports = []
patterns = []
for wwn in wwns:
pt = re.compile(wwn, re.IGNORECASE)
patterns.append(pt)
lines = stdout.splitlines()
buf = {}
_buffer = []
port = None
security = None
for line in lines:
if re.match('Port', line):
port = shlex.split(line)[1]
if target_ports and port not in target_ports:
port = None
else:
security = True if shlex.split(line)[5] == 'ON' else False
buf[port] = {'detected': [], 'assigned': [],
'assignable': []}
if security:
security_ports.append(port)
continue
if port and security:
if re.search('Detected WWN', line):
_buffer = buf[port]['detected']
continue
elif re.search('Assigned WWN', line):
_buffer = buf[port]['assigned']
continue
elif re.search('Assignable WWN', line):
_buffer = buf[port]['assignable']
continue
_buffer.append(line)
hostgroups = []
for port in buf.keys():
self.get_hostgroup_from_wwns(
hostgroups, port, patterns, buf, login)
for hostgroup in hostgroups:
hgs.append(hostgroup)
return security_ports
def comm_delete_lun_core(self, command, hostgroups, lun):
unit = self.unit_name
no_lun_cnt = 0
deleted_hostgroups = []
for hostgroup in hostgroups:
LOG.debug('comm_delete_lun: hostgroup is %s', hostgroup)
port = hostgroup['port']
gid = hostgroup['gid']
ctl_no = port[0]
port_no = port[1]
is_deleted = False
for deleted in deleted_hostgroups:
if port == deleted['port'] and gid == deleted['gid']:
is_deleted = True
if is_deleted:
continue
ret, stdout, stderr = self.exec_hsnm(command,
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd=command, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[2:]:
line = shlex.split(line)
if not line:
continue
if (line[0] == port and int(line[1][0:3]) == gid
and int(line[3]) == lun):
hlu = int(line[2])
break
else:
no_lun_cnt += 1
if no_lun_cnt == len(hostgroups):
raise exception.HBSDNotFound
else:
continue
opt = '-unit %s -rm %s %s %d %d %d' % (unit, ctl_no, port_no,
gid, hlu, lun)
ret, stdout, stderr = self.exec_hsnm(command, opt)
if ret:
msg = basic_lib.output_err(
600, cmd=command, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
deleted_hostgroups.append({'port': port, 'gid': gid})
LOG.debug('comm_delete_lun is over (%d)', lun)
def comm_delete_lun(self, hostgroups, ldev):
self.comm_delete_lun_core('auhgmap', hostgroups, ldev)
def comm_delete_lun_iscsi(self, hostgroups, ldev):
self.comm_delete_lun_core('autargetmap', hostgroups, ldev)
def comm_add_ldev(self, pool_id, ldev, capacity, is_vvol):
unit = self.unit_name
if is_vvol:
command = 'aureplicationvvol'
opt = ('-unit %s -add -lu %d -size %dg'
% (unit, ldev, capacity))
else:
command = 'auluadd'
opt = ('-unit %s -lu %d -dppoolno %d -size %dg'
% (unit, ldev, pool_id, capacity))
ret, stdout, stderr = self.exec_hsnm(command, opt)
if ret:
if (re.search('DMEC002047', stderr)
or re.search('DMES052602', stderr)
or re.search('DMED09000A', stderr)):
raise exception.HBSDNotFound
else:
msg = basic_lib.output_err(
600, cmd=command, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_add_hostgrp(self, port, gid, host_grp_name):
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
opt = '-unit %s -add %s %s -gno %d -gname %s' % (unit, ctl_no,
port_no, gid,
host_grp_name)
ret, stdout, stderr = self.exec_hsnm('auhgdef', opt)
if ret:
raise exception.HBSDNotFound
def comm_del_hostgrp(self, port, gid, host_grp_name):
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
opt = '-unit %s -rm %s %s -gname %s' % (unit, ctl_no, port_no,
host_grp_name)
ret, stdout, stderr = self.exec_hsnm('auhgdef', opt)
if ret:
msg = basic_lib.output_err(
600, cmd='auhgdef', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_add_hbawwn(self, port, gid, wwn):
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
opt = '-unit %s -set -permhg %s %s %s -gno %d' % (unit, ctl_no,
port_no, wwn, gid)
ret, stdout, stderr = self.exec_hsnm('auhgwwn', opt)
if ret:
opt = '-unit %s -assign -permhg %s %s %s -gno %d' % (unit, ctl_no,
port_no, wwn,
gid)
ret, stdout, stderr = self.exec_hsnm('auhgwwn', opt)
if ret:
msg = basic_lib.output_err(
600, cmd='auhgwwn', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_add_lun(self, command, hostgroups, ldev, is_once=False):
unit = self.unit_name
tmp_hostgroups = hostgroups[:]
used_list = []
is_ok = False
hlu = None
old_hlu = None
for hostgroup in hostgroups:
port = hostgroup['port']
gid = hostgroup['gid']
hlu = self.add_used_hlun(command, port, gid, used_list, ldev)
# When 'hlu' or 'old_hlu' is 0, it should be true.
# So, it cannot remove 'is not None'.
if hlu is not None:
if old_hlu is not None and old_hlu != hlu:
msg = basic_lib.output_err(648, resource='LUN (HLUN)')
raise exception.HBSDError(message=msg)
is_ok = True
hostgroup['lun'] = hlu
tmp_hostgroups.remove(hostgroup)
old_hlu = hlu
else:
hlu = old_hlu
if not used_list:
hlu = 0
elif hlu is None:
for i in range(MAX_HLUN + 1):
if i not in used_list:
hlu = i
break
else:
raise exception.HBSDNotFound
ret = 0
stdout = None
stderr = None
invalid_hgs_str = None
for hostgroup in tmp_hostgroups:
port = hostgroup['port']
gid = hostgroup['gid']
ctl_no = port[0]
port_no = port[1]
if not hostgroup['detected']:
if invalid_hgs_str:
invalid_hgs_str = '%s, %s:%d' % (invalid_hgs_str,
port, gid)
else:
invalid_hgs_str = '%s:%d' % (port, gid)
continue
opt = '-unit %s -add %s %s %d %d %d' % (unit, ctl_no, port_no,
gid, hlu, ldev)
ret, stdout, stderr = self.exec_hsnm(command, opt)
if ret == 0:
is_ok = True
hostgroup['lun'] = hlu
if is_once:
break
else:
LOG.warning(basic_lib.set_msg(
314, ldev=ldev, lun=hlu, port=port, id=gid))
if not is_ok:
if stderr:
msg = basic_lib.output_err(
600, cmd=command, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
else:
msg = basic_lib.output_err(659, gid=invalid_hgs_str)
raise exception.HBSDError(message=msg)
def comm_delete_ldev(self, ldev, is_vvol):
unit = self.unit_name
if is_vvol:
command = 'aureplicationvvol'
opt = '-unit %s -rm -lu %d' % (unit, ldev)
else:
command = 'auludel'
opt = '-unit %s -lu %d -f' % (unit, ldev)
ret, stdout, stderr = self.exec_hsnm(command, opt,
timeout=30, interval=3)
if ret:
if (re.search('DMEC002048', stderr)
or re.search('DMED090026', stderr)):
raise exception.HBSDNotFound
msg = basic_lib.output_err(
600, cmd=command, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return ret
def comm_extend_ldev(self, ldev, old_size, new_size):
unit = self.unit_name
command = 'auluchgsize'
options = '-unit %s -lu %d -size %dg' % (unit, ldev, new_size)
ret, stdout, stderr = self.exec_hsnm(command, options)
if ret:
msg = basic_lib.output_err(
600, cmd=command, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def delete_chap_user(self, port):
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
auth_username = self.conf.hitachi_auth_user
opt = '-unit %s -rm %s %s -user %s' % (unit, ctl_no, port_no,
auth_username)
return self.exec_hsnm('auchapuser', opt)
def _wait_for_add_chap_user(self, cmd, auth_username,
auth_password, start):
# Don't move 'import pexpect' to the beginning of the file so that
# a tempest can work.
import pexpect
lock = basic_lib.get_process_lock(self.hsnm_lock_file)
with self.hsnm_lock, lock:
try:
child = pexpect.spawn(cmd)
child.expect('Secret: ', timeout=CHAP_TIMEOUT)
child.sendline(auth_password)
child.expect('Re-enter Secret: ',
timeout=CHAP_TIMEOUT)
child.sendline(auth_password)
child.expect('The CHAP user information has '
'been added successfully.',
timeout=CHAP_TIMEOUT)
except Exception:
if time.time() - start >= EXEC_TIMEOUT:
msg = basic_lib.output_err(642, user=auth_username)
raise exception.HBSDError(message=msg)
else:
raise loopingcall.LoopingCallDone(True)
def set_chap_authention(self, port, gid):
ctl_no = port[0]
port_no = port[1]
unit = self.unit_name
auth_username = self.conf.hitachi_auth_user
auth_password = self.conf.hitachi_auth_password
add_chap_user = self.conf.hitachi_add_chap_user
assign_flag = True
added_flag = False
opt = '-unit %s -refer %s %s -user %s' % (unit, ctl_no, port_no,
auth_username)
ret, stdout, stderr = self.exec_hsnm('auchapuser', opt, noretry=True)
if ret:
if not add_chap_user:
msg = basic_lib.output_err(643, user=auth_username)
raise exception.HBSDError(message=msg)
root_helper = utils.get_root_helper()
cmd = ('%s env %s auchapuser -unit %s -add %s %s '
'-tno %d -user %s' % (root_helper, SNM2_ENV, unit, ctl_no,
port_no, gid, auth_username))
LOG.debug('Add CHAP user')
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_add_chap_user, cmd,
auth_username, auth_password, time.time())
added_flag = loop.start(interval=EXEC_INTERVAL).wait()
else:
lines = stdout.splitlines()[4:]
for line in lines:
if int(shlex.split(line)[0][0:3]) == gid:
assign_flag = False
break
if assign_flag:
opt = '-unit %s -assign %s %s -tno %d -user %s' % (unit, ctl_no,
port_no, gid,
auth_username)
ret, stdout, stderr = self.exec_hsnm('auchapuser', opt)
if ret:
if added_flag:
_ret, _stdout, _stderr = self.delete_chap_user(port)
if _ret:
LOG.warning(basic_lib.set_msg(
303, user=auth_username))
msg = basic_lib.output_err(
600, cmd='auchapuser', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return added_flag
def comm_add_hostgrp_iscsi(self, port, gid, target_alias, target_iqn):
auth_method = self.conf.hitachi_auth_method
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
if auth_method:
auth_arg = '-authmethod %s -mutual disable' % auth_method
else:
auth_arg = '-authmethod None'
opt = '-unit %s -add %s %s -tno %d' % (unit, ctl_no, port_no, gid)
opt = '%s -talias %s -iname %s %s' % (opt, target_alias, target_iqn,
auth_arg)
ret, stdout, stderr = self.exec_hsnm('autargetdef', opt)
if ret:
raise exception.HBSDNotFound
def delete_iscsi_target(self, port, _target_no, target_alias):
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
opt = '-unit %s -rm %s %s -talias %s' % (unit, ctl_no, port_no,
target_alias)
return self.exec_hsnm('autargetdef', opt)
def comm_set_hostgrp_reportportal(self, port, target_alias):
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
opt = '-unit %s -set %s %s -talias %s' % (unit, ctl_no, port_no,
target_alias)
opt = '%s -ReportFullPortalList enable' % opt
ret, stdout, stderr = self.exec_hsnm('autargetopt', opt)
if ret:
msg = basic_lib.output_err(
600, cmd='autargetopt', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_add_initiator(self, port, gid, host_iqn):
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
opt = '-unit %s -add %s %s -tno %d -iname %s' % (unit, ctl_no,
port_no, gid,
host_iqn)
ret, stdout, stderr = self.exec_hsnm('autargetini', opt)
if ret:
msg = basic_lib.output_err(
600, cmd='autargetini', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_get_hostgroup_info_iscsi(self, hgs, host_iqn, target_ports):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('autargetini',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='autargetini', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
security_ports = []
lines = stdout.splitlines()
hostgroups = []
security = True
for line in lines:
if not shlex.split(line):
continue
if re.match('Port', line):
line = shlex.split(line)
port = line[1]
security = True if line[4] == 'ON' else False
continue
if target_ports and port not in target_ports:
continue
if security:
if (host_iqn in shlex.split(line[72:]) and
re.match(basic_lib.NAME_PREFIX,
shlex.split(line)[0][4:])):
gid = int(shlex.split(line)[0][0:3])
hostgroups.append(
{'port': port, 'gid': gid, 'detected': True})
LOG.debug('Find port=%(port)s gid=%(gid)d',
{'port': port, 'gid': gid})
if port not in security_ports:
security_ports.append(port)
for hostgroup in hostgroups:
hgs.append(hostgroup)
return security_ports
def comm_get_iscsi_ip(self, port):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('auiscsi',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='auiscsi', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
is_target_port = False
for line in lines:
line_array = shlex.split(line)
if not line_array:
continue
if line_array[0] == 'Port' and line_array[1] != 'Number':
if line_array[1] == port:
is_target_port = True
else:
is_target_port = False
continue
if is_target_port and re.search('IPv4 Address', line):
ip_addr = shlex.split(line)[3]
break
if is_target_port and re.search('Port Number', line):
ip_port = shlex.split(line)[3]
else:
msg = basic_lib.output_err(651)
raise exception.HBSDError(message=msg)
return ip_addr, ip_port
def comm_get_target_iqn(self, port, gid):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('autargetdef',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='autargetdef', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
is_target_host = False
tmp_port = None
lines = stdout.splitlines()
for line in lines:
line = shlex.split(line)
if not line:
continue
if line[0] == "Port":
tmp_port = line[1]
continue
if port != tmp_port:
continue
gid_tmp = line[0][0:3]
if gid_tmp.isdigit() and int(gid_tmp) == gid:
is_target_host = True
continue
if is_target_host and line[0] == "iSCSI":
target_iqn = line[3]
break
else:
msg = basic_lib.output_err(650, resource='IQN')
raise exception.HBSDError(message=msg)
return target_iqn
def get_unused_gid_iscsi(self, group_range, port):
start = group_range[0]
end = min(group_range[1], MAX_HOSTGROUPS_ISCSI)
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('autargetdef',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='autargetdef', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
used_list = []
tmp_port = None
lines = stdout.splitlines()
for line in lines:
line = shlex.split(line)
if not line:
continue
if line[0] == "Port":
tmp_port = line[1]
continue
if port != tmp_port:
continue
if line[0][0:3].isdigit():
gid = int(line[0][0:3])
if start <= gid <= end:
used_list.append(gid)
if not used_list:
return start
for gid in range(start, end + 1):
if gid not in used_list:
break
else:
msg = basic_lib.output_err(648, resource='GID')
raise exception.HBSDError(message=msg)
return gid
def get_gid_from_targetiqn(self, target_iqn, target_alias, port):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('autargetdef',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='autargetdef', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
gid = None
tmp_port = None
found_alias_full = False
found_alias_part = False
lines = stdout.splitlines()
for line in lines:
line = shlex.split(line)
if not line:
continue
if line[0] == "Port":
tmp_port = line[1]
continue
if port != tmp_port:
continue
if line[0][0:3].isdigit():
tmp_gid = int(line[0][0:3])
if re.match(basic_lib.NAME_PREFIX, line[0][4:]):
found_alias_part = True
if line[0][4:] == target_alias:
found_alias_full = True
continue
if line[0] == "iSCSI":
if line[3] == target_iqn:
gid = tmp_gid
break
else:
found_alias_part = False
if found_alias_full and gid is None:
msg = basic_lib.output_err(641)
raise exception.HBSDError(message=msg)
# When 'gid' is 0, it should be true.
# So, it cannot remove 'is not None'.
if not found_alias_part and gid is not None:
msg = basic_lib.output_err(641)
raise exception.HBSDError(message=msg)
return gid
def comm_get_dp_pool(self, pool_id):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('audppool',
'-unit %s -refer -g' % unit,
printflag=False)
if ret:
msg = basic_lib.output_err(
600, cmd='audppool', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[2:]:
tc_cc = re.search('\s(\d+\.\d) GB\s+(\d+\.\d) GB\s', line)
pool_tmp = re.match('\s*\d+', line)
if (pool_tmp and tc_cc
and int(pool_tmp.group(0)) == pool_id):
total_gb = int(float(tc_cc.group(1)))
free_gb = total_gb - int(float(tc_cc.group(2)))
return total_gb, free_gb
msg = basic_lib.output_err(640, pool_id=pool_id)
raise exception.HBSDError(message=msg)
def is_detected(self, port, wwn):
hgs = []
self.comm_get_hostgroup_info(hgs, [wwn], [port], login=True)
return hgs[0]['detected']
def pairoperate(self, opr, pvol, svol, is_vvol, args=None):
unit = self.unit_name
method = '-ss' if is_vvol else '-si'
opt = '-unit %s -%s %s -pvol %d -svol %d' % (unit, opr, method,
pvol, svol)
if args:
opt = '%s %s' % (opt, args)
ret, stdout, stderr = self.exec_hsnm('aureplicationlocal', opt)
if ret:
opt = '%s %s' % ('aureplicationlocal', opt)
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_create_pair(self, pvol, svol, is_vvol):
if not is_vvol:
args = '-compsplit -pace %s' % self.pace
method = basic_lib.FULL
else:
pool = self.conf.hitachi_thin_pool_id
args = ('-localrepdppoolno %d -localmngdppoolno %d '
'-compsplit -pace %s' % (pool, pool, self.pace))
method = basic_lib.THIN
try:
self.pairoperate('create', pvol, svol, is_vvol, args=args)
except exception.HBSDCmdError as ex:
if (re.search('DMER0300B8', ex.stderr)
or re.search('DMER0800CF', ex.stderr)
or re.search('DMER0800D[0-6D]', ex.stderr)
or re.search('DMER03006A', ex.stderr)
or re.search('DMER030080', ex.stderr)):
msg = basic_lib.output_err(615, copy_method=method, pvol=pvol)
raise exception.HBSDBusy(message=msg)
else:
raise
def _comm_pairevtwait(self, pvol, svol, is_vvol):
unit = self.unit_name
if not is_vvol:
pairname = 'SI_LU%04d_LU%04d' % (pvol, svol)
method = '-si'
else:
pairname = 'SS_LU%04d_LU%04d' % (pvol, svol)
method = '-ss'
opt = ('-unit %s -evwait %s -pairname %s -gname Ungrouped -nowait' %
(unit, method, pairname))
ret, stdout, stderr = self.exec_hsnm('aureplicationmon',
opt, noretry=True)
return ret
def _wait_for_pair_status(self, pvol, svol, is_vvol,
status, timeout, start):
if self._comm_pairevtwait(pvol, svol, is_vvol) in status:
raise loopingcall.LoopingCallDone()
if time.time() - start >= timeout:
msg = basic_lib.output_err(
637, method='_wait_for_pair_status', timeout=timeout)
raise exception.HBSDError(message=msg)
def comm_pairevtwait(self, pvol, svol, is_vvol, status, timeout, interval):
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_pair_status, pvol, svol, is_vvol,
status, timeout, time.time())
loop.start(interval=interval).wait()
def delete_pair(self, pvol, svol, is_vvol):
self.pairoperate('simplex', pvol, svol, is_vvol)
def trans_status_hsnm2raid(self, str):
status = None
obj = re.search('Split\((.*)%\)', str)
if obj:
status = basic_lib.PSUS
obj = re.search('Paired\((.*)%\)', str)
if obj:
status = basic_lib.PAIR
return status
def get_paired_info(self, ldev, only_flag=False):
opt_base = '-unit %s -refer' % self.unit_name
if only_flag:
opt_base = '%s -ss' % opt_base
opt = '%s -pvol %d' % (opt_base, ldev)
ret, stdout, stderr = self.exec_hsnm('aureplicationlocal',
opt, noretry=True)
if ret == 0:
lines = stdout.splitlines()
pair_info = {'pvol': ldev, 'svol': []}
for line in lines[1:]:
status = self.trans_status_hsnm2raid(line)
if re.search('SnapShot', line[100:]):
is_vvol = True
else:
is_vvol = False
line = shlex.split(line)
if not line:
break
svol = int(line[2])
pair_info['svol'].append({'lun': svol,
'status': status,
'is_vvol': is_vvol})
return pair_info
opt = '%s -svol %d' % (opt_base, ldev)
ret, stdout, stderr = self.exec_hsnm('aureplicationlocal',
opt, noretry=True)
if ret == 1:
return {'pvol': None, 'svol': []}
lines = stdout.splitlines()
status = self.trans_status_hsnm2raid(lines[1])
if re.search('SnapShot', lines[1][100:]):
is_vvol = True
else:
is_vvol = False
line = shlex.split(lines[1])
pvol = int(line[1])
return {'pvol': pvol, 'svol': [{'lun': ldev,
'status': status,
'is_vvol': is_vvol}]}
def create_lock_file(self):
basic_lib.create_empty_file(self.hsnm_lock_file)
def get_hostgroup_luns(self, port, gid):
list = []
self.add_used_hlun('auhgmap', port, gid, list, DUMMY_LU)
return list
def get_ldev_size_in_gigabyte(self, ldev, existing_ref):
param = 'unit_name'
if param not in existing_ref:
msg = basic_lib.output_err(700, param=param)
raise exception.HBSDError(data=msg)
storage = existing_ref.get(param)
if storage != self.conf.hitachi_unit_name:
msg = basic_lib.output_err(648, resource=param)
raise exception.HBSDError(data=msg)
try:
stdout = self._get_lu(ldev)
except exception.HBSDError:
with excutils.save_and_reraise_exception():
basic_lib.output_err(648, resource='LDEV')
lines = stdout.splitlines()
line = lines[2]
splits = shlex.split(line)
vol_type = splits[len(splits) - 1]
if basic_lib.NORMAL_VOLUME_TYPE != vol_type:
msg = basic_lib.output_err(702, ldev=ldev)
raise exception.HBSDError(data=msg)
dppool = splits[5]
if 'N/A' == dppool:
msg = basic_lib.output_err(702, ldev=ldev)
raise exception.HBSDError(data=msg)
# Hitachi storage calculates volume sizes in a block unit, 512 bytes.
# So, units.Gi is divided by 512.
size = int(splits[1])
if size % (units.Gi / 512):
msg = basic_lib.output_err(703, ldev=ldev)
raise exception.HBSDError(data=msg)
num_port = int(splits[len(splits) - 2])
if num_port:
msg = basic_lib.output_err(704, ldev=ldev)
raise exception.HBSDError(data=msg)
return size / (units.Gi / 512)
|
saeki-masaki/cinder
|
cinder/volume/drivers/hitachi/hbsd_snm2.py
|
Python
|
apache-2.0
| 43,687
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.common import Backend
from lib.core.data import conf
from lib.core.data import logger
from lib.core.dicts import DBMS_DICT
from lib.core.enums import DBMS
from lib.core.settings import MSSQL_ALIASES
from lib.core.settings import MYSQL_ALIASES
from lib.core.settings import ORACLE_ALIASES
from lib.core.settings import PGSQL_ALIASES
from lib.core.settings import SQLITE_ALIASES
from lib.core.settings import ACCESS_ALIASES
from lib.core.settings import FIREBIRD_ALIASES
from lib.core.settings import MAXDB_ALIASES
from lib.core.settings import SYBASE_ALIASES
from lib.core.settings import DB2_ALIASES
from lib.core.settings import HSQLDB_ALIASES
from lib.utils.sqlalchemy import SQLAlchemy
from plugins.dbms.mssqlserver import MSSQLServerMap
from plugins.dbms.mssqlserver.connector import Connector as MSSQLServerConn
from plugins.dbms.mysql import MySQLMap
from plugins.dbms.mysql.connector import Connector as MySQLConn
from plugins.dbms.oracle import OracleMap
from plugins.dbms.oracle.connector import Connector as OracleConn
from plugins.dbms.postgresql import PostgreSQLMap
from plugins.dbms.postgresql.connector import Connector as PostgreSQLConn
from plugins.dbms.sqlite import SQLiteMap
from plugins.dbms.sqlite.connector import Connector as SQLiteConn
from plugins.dbms.access import AccessMap
from plugins.dbms.access.connector import Connector as AccessConn
from plugins.dbms.firebird import FirebirdMap
from plugins.dbms.firebird.connector import Connector as FirebirdConn
from plugins.dbms.maxdb import MaxDBMap
from plugins.dbms.maxdb.connector import Connector as MaxDBConn
from plugins.dbms.sybase import SybaseMap
from plugins.dbms.sybase.connector import Connector as SybaseConn
from plugins.dbms.db2 import DB2Map
from plugins.dbms.db2.connector import Connector as DB2Conn
from plugins.dbms.hsqldb import HSQLDBMap
from plugins.dbms.hsqldb.connector import Connector as HSQLDBConn
def setHandler():
"""
Detect which is the target web application back-end database
management system.
"""
items = [
(DBMS.MYSQL, MYSQL_ALIASES, MySQLMap, MySQLConn),
(DBMS.ORACLE, ORACLE_ALIASES, OracleMap, OracleConn),
(DBMS.PGSQL, PGSQL_ALIASES, PostgreSQLMap, PostgreSQLConn),
(DBMS.MSSQL, MSSQL_ALIASES, MSSQLServerMap, MSSQLServerConn),
(DBMS.SQLITE, SQLITE_ALIASES, SQLiteMap, SQLiteConn),
(DBMS.ACCESS, ACCESS_ALIASES, AccessMap, AccessConn),
(DBMS.FIREBIRD, FIREBIRD_ALIASES, FirebirdMap, FirebirdConn),
(DBMS.MAXDB, MAXDB_ALIASES, MaxDBMap, MaxDBConn),
(DBMS.SYBASE, SYBASE_ALIASES, SybaseMap, SybaseConn),
(DBMS.DB2, DB2_ALIASES, DB2Map, DB2Conn),
(DBMS.HSQLDB, HSQLDB_ALIASES, HSQLDBMap, HSQLDBConn),
]
_ = max(_ if (Backend.getIdentifiedDbms() or "").lower() in _[1] else None for _ in items)
if _:
items.remove(_)
items.insert(0, _)
for dbms, aliases, Handler, Connector in items:
if conf.dbms and conf.dbms.lower() != dbms and conf.dbms.lower() not in aliases:
debugMsg = "skipping test for %s" % dbms
logger.debug(debugMsg)
continue
handler = Handler()
conf.dbmsConnector = Connector()
if conf.direct:
logger.debug("forcing timeout to 10 seconds")
conf.timeout = 10
dialect = DBMS_DICT[dbms][3]
if dialect:
sqlalchemy = SQLAlchemy(dialect=dialect)
sqlalchemy.connect()
if sqlalchemy.connector:
conf.dbmsConnector = sqlalchemy
else:
try:
conf.dbmsConnector.connect()
except NameError:
pass
else:
conf.dbmsConnector.connect()
if handler.checkDbms():
conf.dbmsHandler = handler
break
else:
conf.dbmsConnector = None
# At this point back-end DBMS is correctly fingerprinted, no need
# to enforce it anymore
Backend.flushForcedDbms()
|
V11/volcano
|
server/sqlmap/lib/controller/handler.py
|
Python
|
mit
| 4,354
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 2 10:14:16 2019
@author: cwhanse
"""
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal
from numpy.testing import assert_allclose
from pvlib import iam as _iam
from conftest import needs_numpy_1_10, requires_scipy
@needs_numpy_1_10
def test_ashrae():
thetas = np.array([-90., -67.5, -45., -22.5, 0., 22.5, 45., 67.5, 89., 90.,
np.nan])
expected = np.array([0, 0.9193437, 0.97928932, 0.99588039, 1., 0.99588039,
0.97928932, 0.9193437, 0, 0, np.nan])
iam = _iam.ashrae(thetas, .05)
assert_allclose(iam, expected, equal_nan=True)
iam_series = _iam.ashrae(pd.Series(thetas))
assert_series_equal(iam_series, pd.Series(expected))
@needs_numpy_1_10
def test_ashrae_scalar():
thetas = -45.
iam = _iam.ashrae(thetas, .05)
expected = 0.97928932
assert_allclose(iam, expected, equal_nan=True)
thetas = np.nan
iam = _iam.ashrae(thetas, .05)
expected = np.nan
assert_allclose(iam, expected, equal_nan=True)
@needs_numpy_1_10
def test_physical():
aoi = np.array([-90., -67.5, -45., -22.5, 0., 22.5, 45., 67.5, 90.,
np.nan])
expected = np.array([0, 0.8893998, 0.98797788, 0.99926198, 1, 0.99926198,
0.98797788, 0.8893998, 0, np.nan])
iam = _iam.physical(aoi, 1.526, 0.002, 4)
assert_allclose(iam, expected, equal_nan=True)
# GitHub issue 397
aoi = pd.Series(aoi)
iam = _iam.physical(aoi, 1.526, 0.002, 4)
expected = pd.Series(expected)
assert_series_equal(iam, expected)
@needs_numpy_1_10
def test_physical_scalar():
aoi = -45.
iam = _iam.physical(aoi, 1.526, 0.002, 4)
expected = 0.98797788
assert_allclose(iam, expected, equal_nan=True)
aoi = np.nan
iam = _iam.physical(aoi, 1.526, 0.002, 4)
expected = np.nan
assert_allclose(iam, expected, equal_nan=True)
def test_martin_ruiz():
aoi = 45.
a_r = 0.16
expected = 0.98986965
# will fail if default values change
iam = _iam.martin_ruiz(aoi)
assert_allclose(iam, expected)
# will fail if parameter names change
iam = _iam.martin_ruiz(aoi=aoi, a_r=a_r)
assert_allclose(iam, expected)
a_r = 0.18
aoi = [-100, -60, 0, 60, 100, np.nan, np.inf]
expected = [0.0, 0.9414631, 1.0, 0.9414631, 0.0, np.nan, 0.0]
# check out of range of inputs as list
iam = _iam.martin_ruiz(aoi, a_r)
assert_allclose(iam, expected, equal_nan=True)
# check out of range of inputs as array
iam = _iam.martin_ruiz(np.array(aoi), a_r)
assert_allclose(iam, expected, equal_nan=True)
# check out of range of inputs as Series
aoi = pd.Series(aoi)
expected = pd.Series(expected)
iam = _iam.martin_ruiz(aoi, a_r)
assert_series_equal(iam, expected)
def test_martin_ruiz_exception():
with pytest.raises(ValueError):
_iam.martin_ruiz(0.0, a_r=0.0)
def test_martin_ruiz_diffuse():
surface_tilt = 30.
a_r = 0.16
expected = (0.9549735, 0.7944426)
# will fail if default values change
iam = _iam.martin_ruiz_diffuse(surface_tilt)
assert_allclose(iam, expected)
# will fail if parameter names change
iam = _iam.martin_ruiz_diffuse(surface_tilt=surface_tilt, a_r=a_r)
assert_allclose(iam, expected)
a_r = 0.18
surface_tilt = [0, 30, 90, 120, 180, np.nan, np.inf]
expected_sky = [0.9407678, 0.9452250, 0.9407678, 0.9055541, 0.0000000,
np.nan, np.nan]
expected_gnd = [0.0000000, 0.7610849, 0.9407678, 0.9483508, 0.9407678,
np.nan, np.nan]
# check various inputs as list
iam = _iam.martin_ruiz_diffuse(surface_tilt, a_r)
assert_allclose(iam[0], expected_sky, atol=1e-7, equal_nan=True)
assert_allclose(iam[1], expected_gnd, atol=1e-7, equal_nan=True)
# check various inputs as array
iam = _iam.martin_ruiz_diffuse(np.array(surface_tilt), a_r)
assert_allclose(iam[0], expected_sky, atol=1e-7, equal_nan=True)
assert_allclose(iam[1], expected_gnd, atol=1e-7, equal_nan=True)
# check various inputs as Series
surface_tilt = pd.Series(surface_tilt)
expected_sky = pd.Series(expected_sky, name='iam_sky')
expected_gnd = pd.Series(expected_gnd, name='iam_ground')
iam = _iam.martin_ruiz_diffuse(surface_tilt, a_r)
assert_series_equal(iam[0], expected_sky)
assert_series_equal(iam[1], expected_gnd)
@requires_scipy
def test_iam_interp():
aoi_meas = [0.0, 45.0, 65.0, 75.0]
iam_meas = [1.0, 0.9, 0.8, 0.6]
# simple default linear method
aoi = 55.0
expected = 0.85
iam = _iam.interp(aoi, aoi_meas, iam_meas)
assert_allclose(iam, expected)
# simple non-default method
aoi = 55.0
expected = 0.8878062
iam = _iam.interp(aoi, aoi_meas, iam_meas, method='cubic')
assert_allclose(iam, expected)
# check with all reference values
aoi = aoi_meas
expected = iam_meas
iam = _iam.interp(aoi, aoi_meas, iam_meas)
assert_allclose(iam, expected)
# check normalization and Series
aoi = pd.Series(aoi)
expected = pd.Series(expected)
iam_mult = np.multiply(0.9, iam_meas)
iam = _iam.interp(aoi, aoi_meas, iam_mult, normalize=True)
assert_series_equal(iam, expected)
# check beyond reference values
aoi = [-45, 0, 45, 85, 90, 95, 100, 105, 110]
expected = [0.9, 1.0, 0.9, 0.4, 0.3, 0.2, 0.1, 0.0, 0.0]
iam = _iam.interp(aoi, aoi_meas, iam_meas)
assert_allclose(iam, expected)
# check exception clause
with pytest.raises(ValueError):
_iam.interp(0.0, [0], [1])
# check exception clause
with pytest.raises(ValueError):
_iam.interp(0.0, [0, 90], [1, -1])
@pytest.mark.parametrize('aoi,expected', [
(45, 0.9975036250000002),
(np.array([[-30, 30, 100, np.nan]]),
np.array([[0, 1.007572, 0, np.nan]])),
(pd.Series([80]), pd.Series([0.597472]))
])
def test_sapm(sapm_module_params, aoi, expected):
out = _iam.sapm(aoi, sapm_module_params)
if isinstance(aoi, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-4)
def test_sapm_limits():
module_parameters = {'B0': 5, 'B1': 0, 'B2': 0, 'B3': 0, 'B4': 0, 'B5': 0}
assert _iam.sapm(1, module_parameters) == 5
module_parameters = {'B0': 5, 'B1': 0, 'B2': 0, 'B3': 0, 'B4': 0, 'B5': 0}
assert _iam.sapm(1, module_parameters, upper=1) == 1
module_parameters = {'B0': -5, 'B1': 0, 'B2': 0, 'B3': 0, 'B4': 0, 'B5': 0}
assert _iam.sapm(1, module_parameters) == 0
|
anomam/pvlib-python
|
pvlib/tests/test_iam.py
|
Python
|
bsd-3-clause
| 6,661
|
#!/usr/bin/python
import subprocess
import sys
sys.path.append('/usr/local/munki')
from munkilib import FoundationPlist
import os
import platform
def get_status(cmd, checkstring):
status = 'Disabled'
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
output = str(e.output)
for line in output.split('\n'):
if checkstring in line:
status = 'Enabled'
break
return status
def fv_status():
cmd = ['/usr/bin/fdesetup', 'status']
return get_status(cmd, 'FileVault is On.')
def main():
if os.path.exists('/usr/bin/fdesetup'):
filevault = fv_status()
else:
filevault = 'Not Supported'
plist_path = '/usr/local/sal/plugin_results.plist'
if os.path.exists(plist_path):
plist = FoundationPlist.readPlist(plist_path)
else:
plist = []
result = {}
result['plugin'] = 'Encryption'
result['historical'] = False
data = {}
data['Filevault'] = filevault
result['data'] = data
plist.append(result)
FoundationPlist.writePlist(plist, plist_path)
if __name__ == '__main__':
main()
|
erikng/sal
|
server/plugins/encryption/scripts/encryption.py
|
Python
|
apache-2.0
| 1,192
|
#!/usr/bin/env python
import unittest
import offer
class OfferTestCase(unittest.TestCase):
def setUp(self):
self.o1 = offer.Offer(501, 101, [3], 1.0, 'all')
self.o2 = offer.Offer(501, 101, [3], 0.555, 'all')
self.o3 = offer.Offer(501, 101, [0], 0.1, 'all')
self.o4 = offer.Offer(501, 101, [247], 1.0, 'all')
self.o5 = offer.Offer(301, 101, [128], 1.0, 'all')
self.o6 = offer.Offer(401, 101, [22], 1.0, 'all')
self.o7 = offer.Offer(501, 101, [22], 0.2, 'secret')
def tearDown(self):
del self.o1
del self.o2
del self.o3
del self.o4
del self.o5
del self.o6
del self.o7
def testStr(self):
self.assertEqual(str(self.o2),
'Offer(id=501, from=101, problem=[3], price=0.555,'
' kind=all)')
def testRepr(self):
self.assertEqual(str(self.o1), repr(self.o1))
def testIsGoodBuy(self):
self.assertEqual(False,
offer.Offer(-1, -1, [2], 1.0, 'all').IsGoodBuyAll())
def testIsGoodBuySecret(self):
# Currently we do the same for secrets as all
# TODO(lee): Improve!
self.assertTrue(self.o7.IsGoodBuySecret())
self.assertTrue(self.o6.IsGoodBuySecret())
self.assertTrue(self.o5.IsGoodBuySecret())
self.assertTrue(self.o4.IsGoodBuySecret())
self.assertTrue(self.o3.IsGoodBuySecret())
self.assertTrue(self.o2.IsGoodBuySecret())
self.assertTrue(self.o1.IsGoodBuySecret())
def testIsGoodBuyAll(self):
self.assertEqual(True, self.o1.IsGoodBuyAll())
self.assertEqual(True, self.o2.IsGoodBuyAll())
self.assertEqual(False, self.o3.IsGoodBuyAll())
self.assertEqual(True, self.o4.IsGoodBuyAll())
self.assertEqual(True, self.o5.IsGoodBuyAll())
self.assertEqual(False, self.o6.IsGoodBuyAll())
self.assertEqual(True, self.o7.IsGoodBuyAll())
def testGetAccept(self):
self.assertEqual(self.o7.GetAccept(), 'accept[501]')
self.assertEqual(self.o7.actedon, True)
self.assertEqual(self.o6.GetAccept(), 'accept[401]')
self.assertEqual(self.o6.actedon, True)
self.assertEqual(self.o5.GetAccept(), 'accept[301]')
self.assertEqual(self.o5.actedon, True)
def testGetOffer(self):
self.assertEqual(self.o7.GetOffer(), 'offer[-1 secret ( 22) 0.20000000]')
self.assertEqual(self.o6.GetOffer(), 'offer[-1 all ( 22) 1.00000000]')
if __name__ == '__main__':
unittest.main()
|
compbrain/Athena-SCG-Bot
|
src/offer_test.py
|
Python
|
bsd-3-clause
| 2,383
|
# -*- coding: utf-8 -*-
from openerp import models, fields
# from openerp.exceptions import UserError
class AccountConfigSettings(models.TransientModel):
_inherit = 'account.config.settings'
# TODO ver si queremosimplementar esto o no
# _afip_ws_selection = (
# lambda self, *args, **kwargs: self.env[
# 'account.journal']._get_afip_ws_selection(*args, **kwargs))
# afip_ws = fields.Selection(
# _afip_ws_selection,
# 'AFIP WS',
# )
# @api.multi
# def set_chart_of_accounts(self):
# """
# We send this value in context because to use them on journal creation
# """
# return super(AccountConfigSettings, self.with_context(
# afip_ws=self.afip_ws,
# )).set_chart_of_accounts()
afip_auth_verify_type = fields.Selection(
related='company_id.afip_auth_verify_type'
)
|
jobiols/odoo-argentina
|
l10n_ar_afipws_fe/res_config.py
|
Python
|
agpl-3.0
| 899
|
#!/usr/bin/env python
from LingwoNLP.document import parseString
from LingwoNLP.remote import ServerInterface
# Quick helper class to prevent us from connecting when its not necessary
class Connector(object):
def __init__(self):
self.remote = None
def get(self):
if self.remote is None:
self.remote = ServerInterface()
self.remote.login()
return self.remote
def lookup(remote, doc, lang):
# TODO: this isn't what we want to do in the end, but it will work now for testing
def qlookup(word):
res = remote.call('lingwo_entry.search', word, {'language':lang})
if len(res) > 0:
return res[0]
return None
# make sure remote gets created
for stream in doc.sents:
for token in stream.tokens:
# skip tokens which are already marked with 'pos' (its the only thing that tells
# us they hae been processed -- we should be able to do better!)
if token.dom.hasAttribute('pos'):
continue
# mark all tokens that are processed here as 'auto'
# TODO: should we have a better heuristic?
token.dom.setAttribute('auto', 'true');
word = unicode(token)
# try first the word, then a lower case version of the word
res = qlookup(word)
if res is None:
res = qlookup(word.lower())
if res is not None:
if word != res['headword']:
token.dom.setAttribute('headword', res['headword'])
token.dom.setAttribute('pos', res['pos'])
if res.has_key('form_name') and res['form_name'] != '_noname_':
token.dom.setAttribute('form', res['form_name'])
def main():
import sys, getopt
opts, args = getopt.getopt(sys.argv[1:], 'nm:l:', [])
do_segment = False
do_lookup = False
do_dryRun = False
mode = "all"
lang = None
for o, a in opts:
if o == '-m':
mode = a
elif o == '-n':
do_dryRun = True
elif o == '-l':
lang = a
if len(args) != 1:
print >> sys.stderr, "Should take the nid of the text on the command line!"
sys.exit(1)
nid = args[0]
parts = mode.split(',')
for p in parts:
if p == 'all':
do_segment = True
do_lookup = True
elif p == 'segment':
do_segment = True
elif p == 'lookup':
do_lookup = True
else:
print >> sys.stderr, "Invalid mode: "+p
sys.exit(1)
conn = Connector()
if nid == '-':
content_item = { 'body': sys.stdin.read() }
else:
content_item = conn.get().call('node.get', int(nid))
if content_item['type'] != 'content':
print >> sys.stderr, "Node must be a content node!"
sys.exit(1)
if lang is None:
lang = content_item['language']
if lang is None:
print >> sys.stderr, "Must pass -l language on the command line!"
sys.exit(1)
doc = parseString(content_item['body'], lang)
if do_segment:
doc.segmentize()
if do_lookup:
lookup(conn.get(), doc, lang)
content_item['body'] = str(doc)
if nid == '-' or do_dryRun:
print content_item['body']
else:
conn.get().call('node.save', content_item)
if __name__ == '__main__': main()
|
dsnopek/lingwo-old
|
nlp/annotate.py
|
Python
|
gpl-2.0
| 3,493
|
from test import *
class RoutingTest(BerryTest):
def test_route_get(self):
self.getPage('/')
self.assertBody("index")
def test_route_get_with_url_params(self):
self.getPage('/hello/world')
self.assertBody("Hello, world!")
def test_route_post(self):
self.getPage('/post', method='POST')
self.assertBody("post")
def test_route_post_with_url_params(self):
self.getPage('/hello/world', method='POST')
self.assertBody("Hello, world!")
def test_routes_are_case_insensitive(self):
self.getPage('/HELLO/world')
self.assertStatus(200)
|
adeel/berry
|
tests/routing_test.py
|
Python
|
mit
| 595
|
from sys import path
import modules
import modules.common
from os.path import abspath, dirname
import threading
import time
path.append(dirname(abspath(__file__)))
def get_version():
return "0.1.0-experimental"
def get_loaded_modules():
l = []
for m in modules.common.Modules.list:
l.append(m.get_metadata())
return l
class BruteForceCampaign(object):
def __init__(self, module):
self.module = module
self.config = None
self.queue_rlock = threading.RLock()
self.checked = 0
self.job_queue = []
self.done = False
def set_configuration(self, config):
self.config = config
def increment_check_count(self):
with self.queue_rlock:
self.checked += 1
def thread_engine(self):
try:
while True:
item = None
with self.queue_rlock:
if self.done:
break
if len(self.job_queue) > 0:
item = self.job_queue[0]
del self.job_queue[0]
if item is None:
time.sleep(0.1)
continue
config = self.config
config['username'] = item[0]
config['password'] = item[1]
if self.module.try_credentials(config):
print "COMPLETE: %s - %s" % (item[0], item[1])
with self.queue_rlock:
self.done = True
self.increment_check_count()
except Exception:
print "exception"
finally:
with self.queue_rlock:
self.done = True
self.job_queue = []
|
bwall/BAMF
|
bamfbrute/__init__.py
|
Python
|
mit
| 1,759
|
"""Support for Ebusd daemon for communication with eBUS heating systems."""
import logging
import socket
import ebusdpy
import voluptuous as vol
from homeassistant.const import (
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PORT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from .const import DOMAIN, SENSOR_TYPES
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "ebusd"
DEFAULT_PORT = 8888
CONF_CIRCUIT = "circuit"
CACHE_TTL = 900
SERVICE_EBUSD_WRITE = "ebusd_write"
def verify_ebusd_config(config):
"""Verify eBusd config."""
circuit = config[CONF_CIRCUIT]
for condition in config[CONF_MONITORED_CONDITIONS]:
if condition not in SENSOR_TYPES[circuit]:
raise vol.Invalid(f"Condition '{condition}' not in '{circuit}'.")
return config
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
vol.All(
{
vol.Required(CONF_CIRCUIT): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=[]): cv.ensure_list,
},
verify_ebusd_config,
)
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the eBusd component."""
_LOGGER.debug("Integration setup started")
conf = config[DOMAIN]
name = conf[CONF_NAME]
circuit = conf[CONF_CIRCUIT]
monitored_conditions = conf.get(CONF_MONITORED_CONDITIONS)
server_address = (conf.get(CONF_HOST), conf.get(CONF_PORT))
try:
ebusdpy.init(server_address)
hass.data[DOMAIN] = EbusdData(server_address, circuit)
sensor_config = {
CONF_MONITORED_CONDITIONS: monitored_conditions,
"client_name": name,
"sensor_types": SENSOR_TYPES[circuit],
}
load_platform(hass, "sensor", DOMAIN, sensor_config, config)
hass.services.register(DOMAIN, SERVICE_EBUSD_WRITE, hass.data[DOMAIN].write)
_LOGGER.debug("Ebusd integration setup completed")
return True
except (socket.timeout, OSError):
return False
class EbusdData:
"""Get the latest data from Ebusd."""
def __init__(self, address, circuit):
"""Initialize the data object."""
self._circuit = circuit
self._address = address
self.value = {}
def update(self, name, stype):
"""Call the Ebusd API to update the data."""
try:
_LOGGER.debug("Opening socket to ebusd %s", name)
command_result = ebusdpy.read(
self._address, self._circuit, name, stype, CACHE_TTL
)
if command_result is not None:
if "ERR:" in command_result:
_LOGGER.warning(command_result)
else:
self.value[name] = command_result
except RuntimeError as err:
_LOGGER.error(err)
raise RuntimeError(err) from err
def write(self, call):
"""Call write methon on ebusd."""
name = call.data.get("name")
value = call.data.get("value")
try:
_LOGGER.debug("Opening socket to ebusd %s", name)
command_result = ebusdpy.write(self._address, self._circuit, name, value)
if command_result is not None and "done" not in command_result:
_LOGGER.warning("Write command failed: %s", name)
except RuntimeError as err:
_LOGGER.error(err)
|
jawilson/home-assistant
|
homeassistant/components/ebusd/__init__.py
|
Python
|
apache-2.0
| 3,733
|
# -*- coding: utf-8 -*-
"""
Common structures and functions used by other scripts.
"""
from xml.etree import cElementTree as ET
str_to_entailment = {'none': 0,
'entailment': 1,
'paraphrase': 2}
entailment_to_str = {v: k for k, v in str_to_entailment.items()}
class Pair(object):
'''
Class representing a pair of texts from SICK or RTE.
It is meant to be used as an abstract representation for both.
'''
def __init__(self, t, h, id_, entailment, similarity):
'''
:param t: string with the text
:param h: string with the hypothesis
:param id_: int indicating id in the original file
:param entailment: int indicating entailment class
:param similarity: float
'''
self.t = t
self.h = h
self.id = id_
self.entailment = entailment
self.similarity = similarity
def read_xml(filename, need_labels):
'''
Read an RTE XML file and return a list of Pair objects.
:param filename: name of the file to read
:param need_labels: boolean indicating if labels should be present
'''
pairs = []
tree = ET.parse(filename)
root = tree.getroot()
for xml_pair in root.iter('pair'):
t = xml_pair.find('t').text
h = xml_pair.find('h').text
attribs = dict(xml_pair.items())
id_ = int(attribs['id'])
if 'entailment' in attribs:
ent_string = attribs['entailment'].lower()
try:
ent_value = str_to_entailment[ent_string]
except ValueError:
msg = 'Unexpected value for attribute "entailment" at pair {}: {}'
raise ValueError(msg.format(id_, ent_string))
else:
ent_value = None
if 'similarity' in attribs:
similarity = float(attribs['similarity'])
else:
similarity = None
if need_labels and similarity is None and ent_value is None:
msg = 'Missing both entailment and similarity values for pair {}'.format(id_)
raise ValueError(msg)
pair = Pair(t, h, id_, ent_value, similarity)
pairs.append(pair)
return pairs
|
nathanshartmann/portuguese_word_embeddings
|
sentence_similarity/utils/commons.py
|
Python
|
gpl-3.0
| 2,224
|
import unittest
import os
import cv2
import scanner
from scanner import getVotesFromImage
class TestSlamVotes(unittest.TestCase):
def setUp(self):
self.longMessage = True
pass
def check_all_images(self, directory):
for filename in os.listdir(directory):
if filename.endswith(".jpg") and "__" in filename:
self.checkImage(directory, filename)
def checkImage(self, directory, filename):
print("Testing image {} ".format(filename))
name = os.path.splitext(filename)[0]
expectedStr = name.split("__")[1]
expectedList = [int(x) for x in expectedStr]
fullFilename = os.path.join(directory, filename)
scanner.setCameraDebugValues()
actualResult = scanner.getVotesFromImage(fullFilename)
if(actualResult != expectedList):
cv2.waitKey(10001)
self.assertEqual(actualResult, expectedList, msg="\nFailure for test image: {}".format(fullFilename))
print("Success for" + filename)
def x_test_card_1(self):
l = getVotesFromImage("../testimages/card__859724459716.jpg")
expected = [8, 5, 9, 7, 2, 4, 4, 5, 9, 7, 1, 6]
self.assertEqual(l, expected)
def test_debug_cards_testimages(self):
#self.checkImage("../testimages/", "card_webcam_box2__896676796899.jpg")
#self.checkImage("../testimages/", "card_webcam_box__785783566369.jpg")
#self.checkImage("../testimages/", "card_blocks__265659759735.jpg")
#self.checkImage("../testimages/", "card__859724459716.jpg")
#self.checkImage("../testimages/", "card_webcam_rotated_bad_background_card__898656717575.jpg")
#self.checkImage("../testimages/", "card_webcam_box3__741798877941.jpg")
cv2.waitKey(10001)
pass
def Xtest_cards_testimages(self):
self.check_all_images("../testimages/")
def test_generator(directory, filename):
def test(self):
self.checkImage(directory,filename)
return test
if __name__ == '__main__':
directory = "../testimages/"
for filename in os.listdir(directory):
if filename.endswith(".jpg") and "__" in filename:
test_name = 'test_%s' % filename
test = test_generator(directory, filename)
setattr(TestSlamVotes , test_name, test)
unittest.main()
|
david-schuler/slam_votes
|
slam_votes/test_slam_votes.py
|
Python
|
gpl-3.0
| 2,364
|
import base64
import binascii
import io
import tempfile
import flask
import google.cloud.storage as gcloud_storage
import google.cloud.exceptions as gcloud_exceptions
from werkzeug.contrib.cache import FileSystemCache
from .. import config, model, util
from .blueprint import coordinator_api
# Cache the worker blob to avoid repeated requests to object storage
cache_dir = tempfile.TemporaryDirectory()
cache = FileSystemCache(cache_dir.name, default_timeout=60*5)
@coordinator_api.route("/download/worker", methods=["GET"])
def download_source_blob():
"""Retrieve the worker blob from object storage."""
cached_blob = cache.get(config.WORKER_ARTIFACT_KEY)
if cached_blob is None:
print("Getting from GCloud", config.WORKER_ARTIFACT_KEY)
# Retrieve from GCloud
try:
gcloud_blob = gcloud_storage.Blob(
config.WORKER_ARTIFACT_KEY,
model.get_deployed_artifacts_bucket(),
chunk_size=262144)
cached_blob = gcloud_blob.download_as_string()
cache.set(config.WORKER_ARTIFACT_KEY, cached_blob)
except gcloud_exceptions.NotFound:
raise util.APIError(404, message="Worker blob not found.")
if cached_blob is None:
raise util.APIError(404, message="Worker blob not found.")
print("Building buffer")
buffer = io.BytesIO()
buffer.write(cached_blob)
buffer.seek(0)
return flask.send_file(buffer, mimetype="application/gzip",
as_attachment=True,
attachment_filename="Halite.tgz")
@coordinator_api.route("/botFile", methods=["POST"])
def upload_bot():
"""Save a compiled bot to object storage."""
user_id = flask.request.form.get("user_id", None)
bot_id = flask.request.form.get("bot_id", None)
if "bot.zip" not in flask.request.files:
raise util.APIError(400, message="Please provide the bot file.")
uploaded_file = flask.request.files["bot.zip"]
# Save to GCloud
blob = gcloud_storage.Blob("{}_{}".format(user_id, bot_id),
model.get_bot_bucket(),
chunk_size=262144)
blob.upload_from_file(uploaded_file)
return util.response_success()
@coordinator_api.route("/botFile", methods=["GET"])
def download_bot():
"""Retrieve a compiled or uncompiled bot from object storage."""
user_id = flask.request.values.get("user_id", None)
bot_id = flask.request.values.get("bot_id", None)
compile = flask.request.values.get("compile", False)
if compile:
bucket = model.get_compilation_bucket()
else:
bucket = model.get_bot_bucket()
# Retrieve from GCloud
try:
botname = "{}_{}".format(user_id, bot_id)
blob = gcloud_storage.Blob(botname,
bucket, chunk_size=262144)
buffer = io.BytesIO()
blob.download_to_file(buffer)
buffer.seek(0)
return flask.send_file(buffer, mimetype="application/zip",
as_attachment=True,
attachment_filename=botname + ".zip")
except gcloud_exceptions.NotFound:
raise util.APIError(404, message="Bot not found.")
@coordinator_api.route("/botHash")
def hash_bot():
"""Get the MD5 hash of a compiled bot."""
user_id = flask.request.args.get("user_id", None)
bot_id = flask.request.args.get("bot_id", None)
compile = flask.request.args.get("compile", False)
if not user_id or not bot_id:
raise util.APIError(400, message="Please provide user and bot ID.")
if compile:
bucket = model.get_compilation_bucket()
else:
bucket = model.get_bot_bucket()
blob = bucket.get_blob("{}_{}".format(user_id, bot_id))
if blob is None:
raise util.APIError(400, message="Bot does not exist.")
return util.response_success({
"hash": binascii.hexlify(base64.b64decode(blob.md5_hash)).decode('utf-8'),
})
|
lanyudhy/Halite-II
|
apiserver/apiserver/coordinator/storage.py
|
Python
|
mit
| 4,017
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as a attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import time
import array
import random
import unittest
import weakref
import abc
import signal
import errno
from itertools import cycle, count
from collections import deque
from test import test_support as support
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
__metaclass__ = type
bytes = support.py3k_bytes
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with io.open(__file__, "r", encoding="latin1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return MockRawIO.write(self, b) * 2
def read(self, n=None):
return MockRawIO.read(self, n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
MockRawIO.readinto(self, buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise IOError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super(MockFileIO, self).__init__(data)
def read(self, n=None):
res = super(MockFileIO, self).read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super(MockFileIO, self).readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
self._blocker_char = None
self._write_stack.append(b[:n])
raise self.BlockingIOError(0, "test blocking", n)
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = bytearray(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data, b" worl")
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(bytearray(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(bytearray()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(IOError, fp.read)
self.assertRaises(IOError, fp.readline)
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(IOError, fp.write, b"blah")
self.assertRaises(IOError, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(IOError, fp.write, "blah")
self.assertRaises(IOError, fp.writelines, ["blah\n"])
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
if not support.is_resource_enabled("largefile"):
print("\nTesting large file ops skipped on %s." % sys.platform,
file=sys.stderr)
print("It requires %d bytes and a long time." % self.LARGE,
file=sys.stderr)
print("Use 'regrtest.py -u largefile test_io' to run it.",
file=sys.stderr)
return
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1 // 0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1 // 0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertTrue(f.tell() > 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super(MyFileIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyFileIO, self).close()
def flush(self):
record.append(3)
super(MyFileIO, self).flush()
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super(MyIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super(MyIO, self).close()
def flush(self):
record.append(self.on_flush)
super(MyIO, self).flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array(b'i', range(10))
n = len(a.tostring())
with self.open(support.TESTFN, "wb", 0) as f:
self.assertEqual(f.write(a), n)
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.write(a), n)
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def test_flush_error_on_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
def bad_flush():
raise IOError()
f.flush = bad_flush
self.assertRaises(IOError, f.close) # exception not swallowed
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
class CIOTest(IOTest):
pass
class PyIOTest(IOTest):
test_array_writes = unittest.skip(
"len(array.array) returns number of elements rather than bytelength"
)(IOTest.test_array_writes)
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_no_fileno(self):
# XXX will we always have fileno() function? If so, kill
# this test. Else, write it.
pass
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 3)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super(MyBufferedIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyBufferedIO, self).close()
def flush(self):
record.append(3)
super(MyBufferedIO, self).flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
writable = bufio.writable()
del bufio
support.gc_collect()
if writable:
self.assertEqual(record, [1, 2, 3])
else:
self.assertEqual(record, [1, 2])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__name__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name=u'dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
def test_flush_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise IOError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(IOError, b.close) # exception not swallowed
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises((AttributeError, TypeError)):
buf.raw = x
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
class CBufferedReaderTest(BufferedReaderTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(IOError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents,
b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
self.assertRaises(IOError, bufio.write, b"abcdef")
def test_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), 8, 12)
class CBufferedWriterTest(BufferedWriterTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(IOError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(IOError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = bytearray(5)
self.assertEqual(pair.readinto(data), 5)
self.assertEqual(data, b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
pair.write(b"def")
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"asdf")
rw.seek(0, 0)
self.assertEqual(b"asdfasdfl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
class CBufferedRandomTest(CBufferedReaderTest, CBufferedWriterTest, BufferedRandomTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == '.':
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin1", newline="\r\n")
self.assertEqual(t.encoding, "latin1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf8", line_buffering=True)
self.assertEqual(t.encoding, "utf8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=u'dummy' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf8")
self.assertEqual(t.encoding, "utf8")
t = self.TextIOWrapper(b)
self.assertTrue(t.encoding is not None)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(True)),
("", testdata.decode("ascii").splitlines(True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super(MyTextIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyTextIO, self).close()
def flush(self):
record.append(3)
super(MyTextIO, self).flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin1", "utf8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(IOError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
f = self.open(support.TESTFN, "wb")
f.write(line*2)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
s = f.read(prefix_size)
self.assertEqual(s, prefix.decode("ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
f = self.open(support.TESTFN, "wb")
f.write(data)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(IOError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=lambda n=x: run(n))
for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02)
event.set()
for t in threads:
t.join()
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
def bad_flush():
raise IOError()
txt.flush = bad_flush
self.assertRaises(IOError, txt.close) # exception not swallowed
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises((AttributeError, TypeError)):
txt.buffer = buf
class CTextIOWrapperTest(TextIOWrapperTest):
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.read)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
class PyTextIOWrapperTest(TextIOWrapperTest):
pass
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(b))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertTrue(obj is not None, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
self.assertRaises(TypeError, self.BlockingIOError)
self.assertRaises(TypeError, self.BlockingIOError, 1)
self.assertRaises(TypeError, self.BlockingIOError, 1, 2, 3, 4)
self.assertRaises(TypeError, self.BlockingIOError, 1, "", None)
b = self.BlockingIOError(1, "")
self.assertEqual(b.characters_written, 0)
class C(unicode):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
class CMiscIOTest(MiscIOTest):
io = io
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1 // 0
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
signal.alarm(1)
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
self.assertRaises(ZeroDivisionError,
wio.write, item * (1024 * 1024))
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupterd_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupterd_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = 1024 * 1024
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
def _read():
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
self.assertEqual(N, wio.write(item * N))
wio.flush()
write_finished = True
t.join()
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupterd_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupterd_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def test_main():
tests = (CIOTest, PyIOTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = dict((name, getattr(io, name)) for name in all_members)
py_io_ns = dict((name, getattr(pyio, name)) for name in all_members)
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
ktan2020/legacy-automation
|
win/Lib/test/test_io.py
|
Python
|
mit
| 105,230
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json
import frappe.permissions
import frappe.async
from frappe import _
from frappe.utils.csvutils import getlink
from frappe.utils.dateutils import parse_date
from frappe.utils import cint, cstr, flt
from frappe.core.page.data_import_tool.data_import_tool import get_data_keys
@frappe.async.handler
def upload(rows = None, submit_after_import=None, ignore_encoding_errors=False, overwrite=None,
ignore_links=False, pre_process=None):
"""upload data"""
frappe.flags.mute_emails = True
# extra input params
params = json.loads(frappe.form_dict.get("params") or '{}')
if params.get("submit_after_import"):
submit_after_import = True
if params.get("ignore_encoding_errors"):
ignore_encoding_errors = True
from frappe.utils.csvutils import read_csv_content_from_uploaded_file
def get_data_keys_definition():
return get_data_keys()
def bad_template():
frappe.throw(_("Please do not change the rows above {0}").format(get_data_keys_definition().data_separator))
def check_data_length():
max_rows = 5000
if not data:
frappe.throw(_("No data found"))
elif len(data) > max_rows:
frappe.throw(_("Only allowed {0} rows in one import").format(max_rows))
def get_start_row():
for i, row in enumerate(rows):
if row and row[0]==get_data_keys_definition().data_separator:
return i+1
bad_template()
def get_header_row(key):
return get_header_row_and_idx(key)[0]
def get_header_row_and_idx(key):
for i, row in enumerate(header):
if row and row[0]==key:
return row, i
return [], -1
def filter_empty_columns(columns):
empty_cols = filter(lambda x: x in ("", None), columns)
if empty_cols:
if columns[-1*len(empty_cols):] == empty_cols:
# filter empty columns if they exist at the end
columns = columns[:-1*len(empty_cols)]
else:
frappe.msgprint(_("Please make sure that there are no empty columns in the file."),
raise_exception=1)
return columns
def make_column_map():
doctype_row, row_idx = get_header_row_and_idx(get_data_keys_definition().doctype)
if row_idx == -1: # old style
return
dt = None
for i, d in enumerate(doctype_row[1:]):
if d not in ("~", "-"):
if d: # value in doctype_row
if doctype_row[i]==dt:
# prev column is doctype (in case of parentfield)
doctype_parentfield[dt] = doctype_row[i+1]
else:
dt = d
doctypes.append(d)
column_idx_to_fieldname[dt] = {}
column_idx_to_fieldtype[dt] = {}
if dt:
column_idx_to_fieldname[dt][i+1] = rows[row_idx + 2][i+1]
column_idx_to_fieldtype[dt][i+1] = rows[row_idx + 4][i+1]
def get_doc(start_idx):
if doctypes:
doc = {}
for idx in xrange(start_idx, len(rows)):
if (not doc) or main_doc_empty(rows[idx]):
for dt in doctypes:
d = {}
for column_idx in column_idx_to_fieldname[dt]:
try:
fieldname = column_idx_to_fieldname[dt][column_idx]
fieldtype = column_idx_to_fieldtype[dt][column_idx]
d[fieldname] = rows[idx][column_idx]
if fieldtype in ("Int", "Check"):
d[fieldname] = cint(d[fieldname])
elif fieldtype in ("Float", "Currency", "Percent"):
d[fieldname] = flt(d[fieldname])
elif fieldtype == "Date":
d[fieldname] = parse_date(d[fieldname]) if d[fieldname] else None
except IndexError:
pass
# scrub quotes from name and modified
if d.get("name") and d["name"].startswith('"'):
d["name"] = d["name"][1:-1]
if sum([0 if not val else 1 for val in d.values()]):
d['doctype'] = dt
if dt == doctype:
doc.update(d)
else:
if not overwrite:
d['parent'] = doc["name"]
d['parenttype'] = doctype
d['parentfield'] = doctype_parentfield[dt]
doc.setdefault(d['parentfield'], []).append(d)
else:
break
return doc
else:
doc = frappe._dict(zip(columns, rows[start_idx][1:]))
doc['doctype'] = doctype
return doc
def main_doc_empty(row):
return not (row and ((len(row) > 1 and row[1]) or (len(row) > 2 and row[2])))
users = frappe.db.sql_list("select name from tabUser")
def prepare_for_insert(doc):
# don't block data import if user is not set
# migrating from another system
if not doc.owner in users:
doc.owner = frappe.session.user
if not doc.modified_by in users:
doc.modified_by = frappe.session.user
# header
if not rows:
rows = read_csv_content_from_uploaded_file(ignore_encoding_errors)
start_row = get_start_row()
header = rows[:start_row]
data = rows[start_row:]
doctype = get_header_row(get_data_keys_definition().main_table)[1]
columns = filter_empty_columns(get_header_row(get_data_keys_definition().columns)[1:])
doctypes = []
doctype_parentfield = {}
column_idx_to_fieldname = {}
column_idx_to_fieldtype = {}
if submit_after_import and not cint(frappe.db.get_value("DocType",
doctype, "is_submittable")):
submit_after_import = False
parenttype = get_header_row(get_data_keys_definition().parent_table)
if len(parenttype) > 1:
parenttype = parenttype[1]
# check permissions
if not frappe.permissions.can_import(parenttype or doctype):
frappe.flags.mute_emails = False
return {"messages": [_("Not allowed to Import") + ": " + _(doctype)], "error": True}
# allow limit rows to be uploaded
check_data_length()
make_column_map()
if overwrite==None:
overwrite = params.get('overwrite')
# delete child rows (if parenttype)
parentfield = None
if parenttype:
parentfield = get_parent_field(doctype, parenttype)
if overwrite:
delete_child_rows(data, doctype)
ret = []
error = False
total = len(data)
for i, row in enumerate(data):
# bypass empty rows
if main_doc_empty(row):
continue
row_idx = i + start_row
doc = None
frappe.publish_realtime(message = {"progress": [i, total]})
doc = get_doc(row_idx)
if pre_process:
pre_process(doc)
try:
frappe.local.message_log = []
if parentfield:
parent = frappe.get_doc(parenttype, doc["parent"])
doc = parent.append(parentfield, doc)
parent.save()
ret.append('Inserted row for %s at #%s' % (getlink(parenttype,
doc.parent), unicode(doc.idx)))
else:
if overwrite and doc["name"] and frappe.db.exists(doctype, doc["name"]):
original = frappe.get_doc(doctype, doc["name"])
original.update(doc)
original.flags.ignore_links = ignore_links
original.save()
ret.append('Updated row (#%d) %s' % (row_idx + 1, getlink(original.doctype, original.name)))
doc = original
else:
doc = frappe.get_doc(doc)
prepare_for_insert(doc)
doc.flags.ignore_links = ignore_links
doc.insert()
ret.append('Inserted row (#%d) %s' % (row_idx + 1, getlink(doc.doctype, doc.name)))
if submit_after_import:
doc.submit()
ret.append('Submitted row (#%d) %s' % (row_idx + 1, getlink(doc.doctype, doc.name)))
except Exception, e:
error = True
if doc:
frappe.errprint(doc if isinstance(doc, dict) else doc.as_dict())
err_msg = frappe.local.message_log and "\n\n".join(frappe.local.message_log) or cstr(e)
ret.append('Error for row (#%d) %s : %s' % (row_idx + 1,
len(row)>1 and row[1] or "", err_msg))
frappe.errprint(frappe.get_traceback())
if error:
frappe.db.rollback()
else:
frappe.db.commit()
frappe.flags.mute_emails = False
return {"messages": ret, "error": error}
def get_parent_field(doctype, parenttype):
parentfield = None
# get parentfield
if parenttype:
for d in frappe.get_meta(parenttype).get_table_fields():
if d.options==doctype:
parentfield = d.fieldname
break
if not parentfield:
frappe.msgprint(_("Did not find {0} for {0} ({1})").format("parentfield", parenttype, doctype))
raise Exception
return parentfield
def delete_child_rows(rows, doctype):
"""delete child rows for all parents"""
for p in list(set([r[1] for r in rows])):
if p:
frappe.db.sql("""delete from `tab{0}` where parent=%s""".format(doctype), p)
|
reachalpineswift/frappe-bench
|
frappe/core/page/data_import_tool/importer.py
|
Python
|
mit
| 8,135
|
# coding: utf-8
class PropertyDict(dict):
def __getattr__(self, name):
if name.startswith("_") and not name == '_id':
return dict.__getattr__(self, name)
return self[name]
class Property(object):
def __init__(self, type=None, doc=None, default=None):
self.type = type
self.default = default
self.name = None
self.__doc__ = doc
def __get__(self, instance, owner):
if instance is None:
return self
value = instance._data.get(self.name)
if value is None:
return self.default
return value
def __set__(self, instance, value):
if self.type and not isinstance(value, self.type):
try:
value = self.type(value)
except ValueError:
raise(ValueError("type of %s must be %s" % (self.name, self.type)))
instance._data[self.name] = value
__collections__ = {}
class CollectionMetaClass(type):
def __new__(cls, name, bases, attrs):
super_new = super(CollectionMetaClass, cls).__new__
# Add the document's fields to the _data
for attr_name, attr_value in attrs.items():
if hasattr(attr_value, "__class__") and issubclass(attr_value.__class__, Property):
attr_value.name = attr_name
new_class = super_new(cls, name, bases, attrs)
if attrs.has_key("__collection__"):
global __collections__
__collections__[attrs.get("__collection__")] = new_class
return new_class
class Collection(object):
__metaclass__ = CollectionMetaClass
def __init__(self, **kw):
self._data = {}
# Assign initial values to instance
for attr_name in kw.keys():
try:
setattr(self, attr_name, kw.pop(attr_name))
except AttributeError:
pass
|
victorpantoja/scraper
|
scraper/repository/mongodb/orm.py
|
Python
|
mit
| 2,030
|
""" Functions for estimating the quality of spike sorting results. These
functions estimate false positive and false negative fractions.
"""
from __future__ import division
import scipy as sp
from scipy.spatial.distance import cdist
import quantities as pq
import neo
from progress_indicator import ProgressIndicator
from . import SpykeException
from conversions import spikes_to_spike_train
def get_refperiod_violations(spike_trains, refperiod, progress=None):
""" Return the refractory period violations in the given spike trains
for the specified refractory period.
:param dict spike_trains: Dictionary of lists of
:class:`neo.core.SpikeTrain` objects.
:param refperiod: The refractory period (time).
:type refperiod: Quantity scalar
:param progress: Set this parameter to report progress.
:type progress: :class:`.progress_indicator.ProgressIndicator`
:returns: Two values:
* The total number of violations.
* A dictionary (with the same indices as ``spike_trains``) of
arrays with violation times (Quantity 1D with the same unit as
``refperiod``) for each spike train.
:rtype: int, dict """
if type(refperiod) != pq.Quantity or \
refperiod.simplified.dimensionality != pq.s.dimensionality:
raise ValueError('refperiod must be a time quantity!')
if not progress:
progress = ProgressIndicator()
total_violations = 0
violations = {}
for u, tL in spike_trains.iteritems():
violations[u] = []
for i, t in enumerate(tL):
st = t.copy()
st.sort()
isi = sp.diff(st)
violations[u].append(st[isi < refperiod].rescale(refperiod.units))
total_violations += len(violations[u][i])
progress.step()
return total_violations, violations
def calculate_refperiod_fp(num_spikes, refperiod, violations, total_time):
""" Return the rate of false positives calculated from refractory period
calculations for each unit. The equation used is described in
(Hill et al. The Journal of Neuroscience. 2011).
:param dict num_spikes: Dictionary of total number of spikes,
indexed by unit.
:param refperiod: The refractory period (time). If the spike sorting
algorithm includes a censored period (a time after a spike during
which no new spikes can be found), subtract it from the refractory
period before passing it to this function.
:type refperiod: Quantity scalar
:param dict violations: Dictionary of total number of violations,
indexed the same as num_spikes.
:param total_time: The total time in which violations could have occured.
:type total_time: Quantity scalar
:returns: A dictionary of false positive rates indexed by unit.
Note that values above 0.5 can not be directly interpreted as a
false positive rate! These very high values can e.g. indicate
that the generating processes are not independent.
"""
if type(refperiod) != pq.Quantity or \
refperiod.simplified.dimensionality != pq.s.dimensionality:
raise ValueError('refperiod must be a time quantity!')
fp = {}
factor = total_time / (2 * refperiod)
for u, n in num_spikes.iteritems():
if n == 0:
fp[u] = 0
continue
zw = (violations[u] * factor / n ** 2).simplified
if zw > 0.25:
fp[u] = 0.5 + sp.sqrt(0.25 - zw).imag
continue
fp[u] = 0.5 - sp.sqrt(0.25 - zw)
return fp
def _multi_norm(x, mean):
""" Evaluate pdf of multivariate normal distribution with a mean
at rows of x with high precision.
"""
d = x.shape[1]
fac = (2 * sp.pi) ** (-d / 2.0)
y = cdist(x, sp.atleast_2d(mean), 'sqeuclidean') * -0.5
return fac * sp.exp(sp.longdouble(y))
def _fast_overlap_whitened(spike_arrays, means):
units = spike_arrays.keys()
spikes = {u: spike_arrays[u].shape[1] for u in spike_arrays.iterkeys()}
prior = {}
total_spikes = 0
for u, mean in means.iteritems():
total_spikes += spikes[u]
if total_spikes < 1:
return {u: (0.0, 0.0) for u in units}, {}
# Arrays of unnormalized posteriors (likelihood times prior)
# for all units
posterior = {}
false_positive = {}
false_negative = {}
for u in units:
prior[u] = spikes[u] / total_spikes
false_positive[u] = 0
false_negative[u] = 0
# Calculate posteriors
for u1 in units[:]:
if not spikes[u1]:
units.remove(u1)
continue
posterior[u1] = {}
for u2, mean in means.iteritems():
llh = _multi_norm(spike_arrays[u1].T, mean)
posterior[u1][u2] = llh * prior[u2]
# Calculate pairwise false positives/negatives
singles = {u: {} for u in units}
for i, u1 in enumerate(units):
u1 = units[i]
for u2 in units[i + 1:]:
f1 = sp.sum(posterior[u1][u2] /
(posterior[u1][u1] + posterior[u1][u2]),
dtype=sp.double)
f2 = sp.sum(posterior[u2][u1] /
(posterior[u2][u1] + posterior[u2][u2]),
dtype=sp.double)
singles[u1][u2] = (f1 / spikes[u1] if spikes[u1] else 0,
f2 / spikes[u1] if spikes[u1] else 0)
singles[u2][u1] = (f2 / spikes[u2] if spikes[u2] else 0,
f1 / spikes[u2] if spikes[u2] else 0)
# Calculate complete false positives/negatives with extended bayes
for u1 in units:
numerator = posterior[u1][u1]
normalizer = sum(posterior[u1][u2] for u2 in units)
false_positive[u1] = sp.sum((normalizer - numerator) / normalizer)
other_units = units[:]
other_units.remove(u1)
numerator = sp.vstack((posterior[u][u1] for u in other_units))
normalizer = sp.vstack(sum(posterior[u][u2] for u2 in units) for u in other_units)
false_negative[u1] = sp.sum(numerator / normalizer)
# Prepare return values, convert sums to means
totals = {}
for u, fp in false_positive.iteritems():
fn = false_negative[u]
if not spikes[u]:
totals[u] = (0, 0)
else:
num = spikes[u]
totals[u] = (fp / num, fn / num)
return totals, singles
def _pair_overlap(waves1, waves2, mean1, mean2, cov1, cov2):
""" Calculate FP/FN estimates for two gaussian clusters
"""
from sklearn import mixture
means = sp.vstack([[mean1], [mean2]])
covars = sp.vstack([[cov1], [cov2]])
weights = sp.array([waves1.shape[1], waves2.shape[1]], dtype=float)
weights /= weights.sum()
# Create mixture of two Gaussians from the existing estimates
mix = mixture.GMM(n_components=2, covariance_type='full', init_params='')
mix.covars_ = covars
mix.weights_ = weights
mix.means_ = means
posterior1 = mix.predict_proba(waves1.T)[:, 1]
posterior2 = mix.predict_proba(waves2.T)[:, 0]
return (posterior1.mean(), posterior2.sum() / len(posterior1),
posterior2.mean(), posterior1.sum() / len(posterior2))
def _object_has_size(obj, size):
""" Return if the object, which could be either a neo.Spike or ndarray,
has the given size. """
if isinstance(obj, neo.Spike):
return obj.waveform.size == size
return obj.size == size
def overlap_fp_fn(spikes, means=None, covariances=None):
""" Return dicts of tuples (False positive rate, false negative rate)
indexed by unit. This function needs :mod:`sklearn` if
``covariances`` is not set to ``'white'``.
This function estimates the pairwise and total false positive and false
negative rates for a number of waveform clusters. The results can be
interpreted as follows: False positives are the fraction of spikes in a
cluster that is estimated to belong to a different cluster (a specific
cluster for pairwise results or any other cluster for total results).
False negatives are the number spikes from other clusters that are
estimated to belong to a given cluster (also expressed as fraction, this
number can be larger than 1 in extreme cases).
Details for the calculation can be found in
(Hill et al. The Journal of Neuroscience. 2011).
The calculation for total false positive and false negative rates does
not follow Hill et al., who propose a simple addition of pairwise
probabilities. Instead, the total error probabilities are estimated
using all clusters at once.
:param dict spikes: Dictionary, indexed by unit, of lists of
spike waveforms as :class:`neo.core.Spike` objects or numpy arrays.
If the waveforms have multiple channels, they will be flattened
automatically. All waveforms need to have the same number of samples.
:param dict means: Dictionary, indexed by unit, of lists of
spike waveforms as :class:`neo.core.Spike` objects or numpy arrays.
Means for units that are not in this dictionary will be estimated
using the spikes. Note that if you pass ``'white'`` for
``covariances`` and you want to provide means, they have to be
whitened in the same way as the spikes.
Default: None, means will be estimated from data.
:param covariances: Dictionary, indexed by unit, of lists of
covariance matrices. Covariances for units that are not in this
dictionary will be estimated using the spikes. It is useful to give
a covariance matrix if few spikes are present - consider using the
noise covariance. If you use prewhitened spikes (i.e. all clusters
are normal distributed, so their covariance matrix is the identity),
you can pass ``'white'`` here. The calculation will be much faster in
this case and the sklearn package is not required.
Default: None, covariances will estimated from data.
:type covariances: dict or str
:returns: Two values:
* A dictionary (indexed by unit) of total
(false positive rate, false negative rate) tuples.
* A dictionary of dictionaries, both indexed by units,
of pairwise (false positive rate, false negative rate) tuples.
:rtype: dict, dict
"""
units = spikes.keys()
total_spikes = 0
for spks in spikes.itervalues():
total_spikes += len(spks)
if total_spikes < 1:
return {u: (0.0, 0.0) for u in units}, {}
if means is None:
means = {}
white = False
if covariances is None:
covariances = {}
elif covariances == 'white':
white = True
covariances = {}
# Convert Spike objects to arrays
dimensionality = None
spike_arrays = {}
for u, spks in spikes.iteritems():
spikelist = []
if not spks or (len(spks) < 2 and u not in covariances):
units.remove(u)
continue
for s in spks:
if isinstance(s, neo.Spike):
spikelist.append(
sp.asarray(s.waveform.rescale(pq.uV)).T.flatten())
else:
spikelist.append(s)
spike_arrays[u] = sp.array(spikelist).T
if dimensionality is None:
dimensionality = spike_arrays[u].shape[0]
elif dimensionality != spike_arrays[u].shape[0]:
raise SpykeException('All spikes need to have the same number'
'of samples!')
if not units:
return {}, {}
if len(units) == 1:
return {units[0]: (0.0, 0.0)}, {}
# Convert or calculate means and covariances
shaped_means = {}
covs = {}
if white:
cov = sp.eye(dimensionality)
covariances = {u: cov for u in units}
for u in units:
if u in means and _object_has_size(means[u], dimensionality):
mean = means[u]
if isinstance(mean, neo.Spike):
shaped_means[u] = sp.asarray(
mean.waveform.rescale(pq.uV)).T.flatten()
else:
shaped_means[u] = means[u].T.flatten()
else:
shaped_means[u] = spike_arrays[u].mean(axis=1)
if white:
return _fast_overlap_whitened(spike_arrays, shaped_means)
for u in units:
if u not in covariances:
covs[u] = sp.cov(spike_arrays[u])
else:
covs[u] = covariances[u]
# Calculate pairwise false positives/negatives
singles = {u: {} for u in units}
for i, u1 in enumerate(units):
u1 = units[i]
for u2 in units[i + 1:]:
error_rates = _pair_overlap(
spike_arrays[u1], spike_arrays[u2],
shaped_means[u1], shaped_means[u2],
covs[u1], covs[u2])
singles[u1][u2] = error_rates[0:2]
singles[u2][u1] = error_rates[2:4]
# Calculate complete false positives/negatives
import sklearn
mix = sklearn.mixture.GMM(n_components=2, covariance_type='full')
mix_means = []
mix_covars = []
mix_weights = []
for u in units:
mix_means.append(shaped_means[u])
mix_covars.append([covs[u]])
mix_weights.append(spike_arrays[u].shape[1])
mix.means_ = sp.vstack(mix_means)
mix.covars_ = sp.vstack(mix_covars)
mix_weights = sp.array(mix_weights, dtype=float)
mix_weights /= mix_weights.sum()
mix.weights_ = mix_weights
# P(spikes of unit[i] in correct cluster)
post_mean = sp.zeros(len(units))
# sum(P(spikes of unit[i] in cluster[j])
post_sum = sp.zeros((len(units), len(units)))
for i, u in enumerate(units):
posterior = mix.predict_proba(spike_arrays[u].T)
post_mean[i] = posterior[:, i].mean()
post_sum[i, :] = posterior.sum(axis=0)
totals = {}
for i, u in enumerate(units):
fp = 1.0 - post_mean[i]
ind = range(len(units))
ind.remove(i)
fn = post_sum[ind, i].sum() / float(spike_arrays[u].shape[1])
totals[u] = (fp, fn)
return totals, singles
def variance_explained(spikes, means=None, noise=None):
""" Returns the fraction of variance in each channel that is explained
by the means.
Values below 0 or above 1 for large data sizes indicate
that some assumptions were incorrect (e.g. about channel noise) and
the results should not be trusted.
:param dict spikes: Dictionary, indexed by unit, of
:class:`neo.core.SpikeTrain` objects (where the ``waveforms``
member includes the spike waveforms) or lists of
:class:`neo.core.Spike` objects.
:param dict means: Dictionary, indexed by unit, of lists of
spike waveforms as :class:`neo.core.Spike` objects or numpy arrays.
Means for units that are not in this dictionary will be estimated
using the spikes.
Default: None - means will be estimated from given spikes.
:type noise: Quantity 1D
:param noise: The known noise levels (as variance) per channel of the
original data. This should be estimated from the signal periods
that do not contain spikes, otherwise the explained variance
could be overestimated. If None, the estimate of explained variance
is done without regard for noise.
Default: None
:return dict: A dictionary of arrays, both indexed by unit. If ``noise``
is ``None``, the dictionary contains
the fraction of explained variance per channel without taking noise
into account. If ``noise`` is given, it contains the fraction of
variance per channel explained by the means and given noise level
together.
"""
ret = {}
if means is None:
means = {}
for u, spks in spikes.iteritems():
train = spks
if not isinstance(train, neo.SpikeTrain):
train = spikes_to_spike_train(spks)
if u in means and means[u].waveform.shape[0] == train.waveforms.shape[1]:
spike = means[u]
else:
spike = neo.Spike(0)
spike.waveform = sp.mean(train.waveforms, axis=0)
orig = sp.mean(sp.var(train.waveforms, axis=1), axis=0)
waves = train.waveforms - spike.waveform
new = sp.mean(sp.var(waves, axis=1), axis=0)
if noise is not None:
ret[u] = sp.asarray(1 - (new - noise) / orig)
else:
ret[u] = sp.asarray(1 - new / orig)
return ret
|
rproepp/spykeutils
|
spykeutils/sorting_quality_assesment.py
|
Python
|
bsd-3-clause
| 16,506
|
#!/usr/bin/env python3
# pylint: disable=C0302
"""
Test the Lambda handler.
"""
# pylint: disable=C0103,C0111,R0904
from base64 import b32encode
from http.server import BaseHTTPRequestHandler, HTTPServer
from json import dumps as json_dumps, loads as json_loads
from logging import getLogger
from os import environ, urandom
from threading import Thread
from unittest import TestCase
from botocore.exceptions import ClientError as BotoClientError
import boto3
from moto import mock_iam
import rolemaker_server as rolemaker
# Fixes for Moto's unimplemented detach_role_policy API.
# https://github.com/spulec/moto/pull/1052
from moto.core.exceptions import RESTError # pylint: disable=C0412
from moto.iam.exceptions import IAMNotFoundException
from moto.iam.models import IAMBackend, iam_backend, ManagedPolicy, Role
from moto.iam.responses import IamResponse
def policy_detach_from_role(self, role):
self.attachment_count -= 1
del role.managed_policies[self.name]
ManagedPolicy.detach_from_role = policy_detach_from_role
def role_delete_policy(self, policy_name):
try:
del self.policies[policy_name]
except KeyError:
raise IAMNotFoundException(
"The role policy with name {0} cannot be found.".format(policy_name))
Role.delete_policy = role_delete_policy
class InvalidParameterError(RESTError):
code = 400
def __init__(self, message):
super(InvalidParameterError, self).__init__(
"InvalidParameterValue", message)
def role_put_policy(self, policy_name, policy_json):
if "TRIGGER_INVALID_JSON" in str(policy_json):
raise InvalidParameterError("Policy contains TRIGGER_INVALID_JSON")
self.policies[policy_name] = policy_json
Role.put_policy = role_put_policy
def backend_detach_role_policy(self, policy_arn, role_name):
arns = dict((p.arn, p) for p in self.managed_policies.values())
try:
policy = arns[policy_arn]
policy.detach_from_role(self.get_role(role_name))
except KeyError:
raise IAMNotFoundException("Policy {0} was not found.".format(policy_arn))
IAMBackend.detach_role_policy = backend_detach_role_policy
def backend_delete_role_policy(self, role_name, policy_name):
role = self.get_role(role_name)
role.delete_policy(policy_name)
IAMBackend.delete_role_policy = backend_delete_role_policy
DETACH_ROLE_POLICY_TEMPLATE = """\
<DetachRolePolicyResponse>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</DetachRolePolicyResponse>"""
def response_detach_role_policy(self):
policy_arn = self._get_param('PolicyArn') # pylint: disable=W0212
role_name = self._get_param('RoleName') # pylint: disable=W0212
iam_backend.detach_role_policy(policy_arn, role_name)
template = self.response_template(DETACH_ROLE_POLICY_TEMPLATE)
return template.render()
IamResponse.detach_role_policy = response_detach_role_policy
DELETE_ROLE_POLICY_TEMPLATE = """\
<DeleteRolePolicyResponse>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</DeleteRolePolicyResponse>"""
def response_delete_role_policy(self):
policy_name = self._get_param('PolicyName') # pylint: disable=W0212
role_name = self._get_param('RoleName') # pylint: disable=W0212
iam_backend.delete_role_policy(role_name, policy_name)
template = self.response_template(DELETE_ROLE_POLICY_TEMPLATE)
return template.render()
IamResponse.delete_role_policy = response_delete_role_policy
### End Moto fixup
def randstr(length=10):
"""
Return random letters/digits.
"""
return b32encode(urandom(length)).decode("ascii").rstrip("=")
class ResponseHandler(BaseHTTPRequestHandler):
"""
Handles S3 POSTs that the Lambda handler sends its results to.
"""
log = getLogger("http")
responses = []
def do_PUT(self):
content_length = self.headers.get("Content-Length")
if content_length is not None:
content_length = int(content_length)
data = self.rfile.read(content_length)
self.responses.append(data)
self.send_response(200, "")
self.send_header("Content-Length", "0")
self.send_header("Server", "AmazonS3")
self.end_headers()
return
def log_message(self, format, *args): # pylint: disable=W0622
"""
Log messages to the regular logging facility; BaseHTTPRequestHandler
forcibly prints them to stderr.
"""
self.log.info(format, *args)
OPEN_MANDATORY_POLICY = {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "*",
"Principal": "*",
}
}
POWER_USER_POLICY = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"NotAction": "iam:*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": ["iam:Get*", "iam:List*"],
"Resource": "*"
}
]
}
S3_POLICY = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:GetObject",
"Resource": "*"
}
]
}
BASIC_ASSUME_ROLE_POLICY = {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Principal": {"Service": "ec2.amazonaws.com"}
}
}
LAMBDA_ASSUME_ROLE_POLICY = {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Principal": {"Service": "lambda.amazonaws.com"}
}
}
@mock_iam
class TestCustomResourceHandler(TestCase):
"""
Test CloudFormation Custom::RestrictedRole resource handling.
"""
mandatory_arn = ""
power_arn = ""
s3_arn = ""
@classmethod
def setUpClass(cls):
cls.server = HTTPServer(("127.0.0.1", 0), ResponseHandler)
cls.thread = Thread(target=cls.server.serve_forever)
cls.thread.start()
@classmethod
def tearDownClass(cls):
cls.server.shutdown()
cls.thread.join()
return
def setUp(self):
self.iam = boto3.client("iam")
result = self.iam.create_policy(
PolicyName="Mandatory",
PolicyDocument=json_dumps(OPEN_MANDATORY_POLICY))
environ["MANDATORY_ROLE_POLICY_ARN"] = self.mandatory_arn = \
result["Policy"]["Arn"]
result = self.iam.create_policy(
PolicyName="Power",
PolicyDocument=json_dumps(POWER_USER_POLICY))
self.power_arn = result["Policy"]["Arn"]
result = self.iam.create_policy(
PolicyName="S3",
PolicyDocument=json_dumps(S3_POLICY))
self.s3_arn = result["Policy"]["Arn"]
ResponseHandler.responses = []
return
def invoke(self, ResourceType, RequestType="Create",
LogicalResourceId="LogicalResourceId", **kw):
sockname = self.server.socket.getsockname()
event = {
"StackId": "arn:aws:cloudformation:us-west-2:12345678:stack/stack-1234",
"RequestId": "req-1234",
"LogicalResourceId": LogicalResourceId,
"RequestType": RequestType,
"ResourceType": ResourceType,
"ResponseURL": "http://%s:%s/" % (sockname[0], sockname[1])
}
if "PhysicalResourceId" in kw:
event["PhysicalResourceId"] = kw.pop("PhysicalResourceId")
if "OldResourceProperties" in kw:
event["OldResourceProperties"] = kw.pop("OldResourceProperties")
event["ResourceProperties"] = kw
rolemaker.lambda_handler(event, None)
return json_loads(ResponseHandler.responses.pop())
def test_unknown_type(self):
result = self.invoke(ResourceType="Custom::Unknown")
self.assertEqual(result["Status"], "FAILED")
self.assertEqual(
result["Reason"],
"ClientError: An error occurred (InvalidParameterValue) when "
"calling the Unknown operation: Cannot handle CloudFormation "
"event Create Custom::Unknown")
def test_basic_create_delete(self):
role_name = "test-bc-%s" % randstr()
self.invoke(
ResourceType="Custom::RestrictedRole",
RoleName=role_name,
AssumeRolePolicyDocument=BASIC_ASSUME_ROLE_POLICY)
print(self.iam.list_roles())
role = self.iam.get_role(RoleName=role_name)
self.assertEqual(role["Role"]["RoleName"], role_name)
arp = role["Role"]["AssumeRolePolicyDocument"]
self.assertEqual(BASIC_ASSUME_ROLE_POLICY, arp)
self.invoke(
ResourceType="Custom::RestrictedRole",
RequestType="Delete",
RoleName=role_name,
PhysicalResourceId=role_name)
with self.assertRaises(BotoClientError):
self.iam.get_role(RoleName=role_name)
def test_policy_updates(self):
create_props = {
"AssumeRolePolicyDocument": BASIC_ASSUME_ROLE_POLICY,
"ManagedPolicyArns": [self.power_arn],
"Policies": [
{
"PolicyName": "strtest",
"PolicyDocument": json_dumps(OPEN_MANDATORY_POLICY)
},
{
"PolicyName": "jsontest",
"PolicyDocument": OPEN_MANDATORY_POLICY
}
]
}
update1_props = {
"AssumeRolePolicyDocument": LAMBDA_ASSUME_ROLE_POLICY,
"ManagedPolicyArns": [self.s3_arn],
"Policies": [
{
"PolicyName": "strtest",
"PolicyDocument": json_dumps(OPEN_MANDATORY_POLICY)
},
{
"PolicyName": "jsontest2",
"PolicyDocument": OPEN_MANDATORY_POLICY
}
]
}
update2_props = {
"AssumeRolePolicyDocument": LAMBDA_ASSUME_ROLE_POLICY,
"ManagedPolicyArns": [self.s3_arn],
"Policies": [
{
"PolicyName": "strtest",
"PolicyDocument": json_dumps(OPEN_MANDATORY_POLICY)
},
{
"PolicyName": "jsontest2",
"PolicyDocument": S3_POLICY
}
]
}
update3_props = {
"AssumeRolePolicyDocument": LAMBDA_ASSUME_ROLE_POLICY,
"ManagedPolicyArns": [self.s3_arn],
"Policies": [
{
"PolicyName": "strtest",
"PolicyDocument": json_dumps(OPEN_MANDATORY_POLICY)
},
{
"PolicyName": "jsontest2",
"PolicyDocument": S3_POLICY,
"BadPolicy": "yes"
}
]
}
update4_props = {
"AssumeRolePolicyDocument": LAMBDA_ASSUME_ROLE_POLICY,
"ManagedPolicyArns": [],
"Policies": [
{
"PolicyName": "jsontest2",
"PolicyDocument": {
"TRIGGER_INVALID_JSON": "Yes",
}
}
]
}
response = self.invoke(
ResourceType="Custom::RestrictedRole", **create_props)
role_name = response["PhysicalResourceId"]
self.iam.get_role(RoleName=role_name)
attached = self.iam.list_attached_role_policies(RoleName=role_name)[
"AttachedPolicies"]
self.assertEqual(len(attached), 2)
policy_arns = set([pol["PolicyArn"] for pol in attached])
self.assertEqual(policy_arns, {self.mandatory_arn, self.power_arn})
inline = set(
self.iam.list_role_policies(RoleName=role_name)["PolicyNames"])
self.assertEqual(inline, {"strtest", "jsontest"})
response = self.invoke(
ResourceType="Custom::RestrictedRole",
RequestType="Update",
PhysicalResourceId=role_name,
OldResourceProperties=create_props,
**update1_props)
self.assertEqual("SUCCESS", response["Status"])
self.iam.get_role(RoleName=role_name)
attached = self.iam.list_attached_role_policies(RoleName=role_name)[
"AttachedPolicies"]
self.assertEqual(len(attached), 2)
policy_arns = set([pol["PolicyArn"] for pol in attached])
self.assertEqual(policy_arns, {self.mandatory_arn, self.s3_arn})
inline = set(
self.iam.list_role_policies(RoleName=role_name)["PolicyNames"])
self.assertEqual(inline, {"strtest", "jsontest2"})
response = self.invoke(
ResourceType="Custom::RestrictedRole",
RequestType="Update",
PhysicalResourceId=role_name,
OldResourceProperties=update1_props,
**update2_props)
self.assertEqual("SUCCESS", response["Status"])
self.iam.get_role(RoleName=role_name)
attached = self.iam.list_attached_role_policies(RoleName=role_name)[
"AttachedPolicies"]
self.assertEqual(len(attached), 2)
policy_arns = set([pol["PolicyArn"] for pol in attached])
self.assertEqual(policy_arns, {self.mandatory_arn, self.s3_arn})
inline = set(
self.iam.list_role_policies(RoleName=role_name)["PolicyNames"])
self.assertEqual(inline, {"strtest", "jsontest2"})
# Rollback due to invalid parameter
response = self.invoke(
ResourceType="Custom::RestrictedRole",
RequestType="Update",
PhysicalResourceId=role_name,
OldResourceProperties=update2_props,
**update3_props)
self.assertEqual("FAILED", response["Status"])
self.iam.get_role(RoleName=role_name)
attached = self.iam.list_attached_role_policies(RoleName=role_name)[
"AttachedPolicies"]
self.assertEqual(len(attached), 2)
policy_arns = set([pol["PolicyArn"] for pol in attached])
self.assertEqual(policy_arns, {self.mandatory_arn, self.s3_arn})
inline = set(
self.iam.list_role_policies(RoleName=role_name)["PolicyNames"])
self.assertEqual(inline, {"strtest", "jsontest2"})
response = self.invoke(
ResourceType="Custom::RestrictedRole",
RequestType="Update",
PhysicalResourceId=role_name,
OldResourceProperties=update2_props,
**update4_props)
self.assertEqual("FAILED", response["Status"])
self.assertIn(
"Policy contains TRIGGER_INVALID_JSON", response["Reason"])
self.iam.get_role(RoleName=role_name)
attached = self.iam.list_attached_role_policies(RoleName=role_name)[
"AttachedPolicies"]
self.assertEqual(len(attached), 2)
policy_arns = set([pol["PolicyArn"] for pol in attached])
self.assertEqual(policy_arns, {self.mandatory_arn, self.s3_arn})
inline = set(
self.iam.list_role_policies(RoleName=role_name)["PolicyNames"])
self.assertEqual(inline, {"strtest", "jsontest2"})
def test_name_change_updates(self):
create_props = {
"RoleName": "role1",
"AssumeRolePolicyDocument": BASIC_ASSUME_ROLE_POLICY,
"ManagedPolicyArns": [self.power_arn],
"Policies": [
{
"PolicyName": "strtest",
"PolicyDocument": json_dumps(OPEN_MANDATORY_POLICY)
},
{
"PolicyName": "jsontest",
"PolicyDocument": OPEN_MANDATORY_POLICY
}
]
}
good_update_props = {
"RoleName": "role2",
"AssumeRolePolicyDocument": BASIC_ASSUME_ROLE_POLICY,
"ManagedPolicyArns": [self.power_arn],
"Policies": [
{
"PolicyName": "strtest",
"PolicyDocument": json_dumps(OPEN_MANDATORY_POLICY)
},
{
"PolicyName": "jsontest",
"PolicyDocument": OPEN_MANDATORY_POLICY
}
]
}
bad_update_props = {
"RoleName": "role3",
"AssumeRolePolicyDocument": BASIC_ASSUME_ROLE_POLICY,
"ManagedPolicyArns": [12345],
"Policies": [
{
"PolicyName": "strtest",
"PolicyDocument": json_dumps(OPEN_MANDATORY_POLICY)
},
{
"PolicyName": "jsontest",
"PolicyDocument": OPEN_MANDATORY_POLICY
}
]
}
response = self.invoke(
ResourceType="Custom::RestrictedRole", **create_props)
role_name = response["PhysicalResourceId"]
self.iam.get_role(RoleName=role_name)
self.assertEqual("SUCCESS", response["Status"])
response = self.invoke(
ResourceType="Custom::RestrictedRole",
RequestType="Update",
PhysicalResourceId=role_name,
OldResourceProperties=create_props,
**good_update_props)
self.assertEqual("SUCCESS", response["Status"])
role_name = response["PhysicalResourceId"]
self.iam.get_role(RoleName=role_name)
response = self.invoke(
ResourceType="Custom::RestrictedRole",
RequestType="Update",
PhysicalResourceId=role_name,
OldResourceProperties=good_update_props,
**bad_update_props)
self.assertEqual("FAILED", response["Status"])
self.iam.get_role(RoleName=role_name)
with self.assertRaises(BotoClientError):
self.iam.get_role(RoleName="role3")
def test_no_update_path(self):
role_name = "test-nup-%s" % randstr()
create_props = {
"RoleName": role_name,
"Path": "/",
"AssumeRolePolicyDocument": BASIC_ASSUME_ROLE_POLICY,
"ManagedPolicyArns": [self.power_arn],
"Policies": [
{
"PolicyName": "strtest",
"PolicyDocument": json_dumps(OPEN_MANDATORY_POLICY)
},
{
"PolicyName": "jsontest",
"PolicyDocument": OPEN_MANDATORY_POLICY
}
]
}
update_props = {
"RoleName": role_name,
"Path": "/foo/",
"AssumeRolePolicyDocument": BASIC_ASSUME_ROLE_POLICY,
"ManagedPolicyArns": [self.power_arn],
"Policies": [
{
"PolicyName": "strtest",
"PolicyDocument": json_dumps(OPEN_MANDATORY_POLICY)
},
{
"PolicyName": "jsontest",
"PolicyDocument": OPEN_MANDATORY_POLICY
}
]
}
response = self.invoke(
ResourceType="Custom::RestrictedRole", **create_props)
role_name = response["PhysicalResourceId"]
self.iam.get_role(RoleName=role_name)
self.assertEqual("SUCCESS", response["Status"])
response = self.invoke(
ResourceType="Custom::RestrictedRole",
RequestType="Update",
PhysicalResourceId=role_name,
OldResourceProperties=create_props,
**update_props)
self.assertEqual("FAILED", response["Status"])
self.assertIn("Cannot update the path to an existing role",
response["Reason"])
role_name = response["PhysicalResourceId"]
self.iam.get_role(RoleName=role_name)
def test_create_bad_inline_policy(self):
response = self.invoke(
ResourceType="Custom::RestrictedRole", RoleName="role1",
AssumeRolePolicyDocument=BASIC_ASSUME_ROLE_POLICY,
Policies=[{"PolicyDocument": {}}])
self.assertEqual("FAILED", response["Status"])
self.assertIn("Inline policy missing PolicyName", response["Reason"])
response = self.invoke(
ResourceType="Custom::RestrictedRole", RoleName="role1",
AssumeRolePolicyDocument=BASIC_ASSUME_ROLE_POLICY,
Policies=[{"PolicyName": "foo"}])
self.assertEqual("FAILED", response["Status"])
self.assertIn("Inline policy missing PolicyDocument",
response["Reason"])
response = self.invoke(
ResourceType="Custom::RestrictedRole", RoleName="role1",
AssumeRolePolicyDocument=BASIC_ASSUME_ROLE_POLICY,
Policies=[{"PolicyName": 1, "PolicyDocument": {}}])
self.assertEqual("FAILED", response["Status"])
self.assertIn(
"Invalid type for parameter PolicyName, value: 1, type "
"<class 'int'>, valid types: <class 'str'>",
response["Reason"])
response = self.invoke(
ResourceType="Custom::RestrictedRole", RoleName="role1",
AssumeRolePolicyDocument=BASIC_ASSUME_ROLE_POLICY,
Policies=[{"PolicyName": "", "PolicyDocument": {}}])
self.assertEqual("FAILED", response["Status"])
self.assertIn("Invalid length for parameter PolicyName, value: 0",
response["Reason"])
response = self.invoke(
ResourceType="Custom::RestrictedRole", RoleName="role1",
AssumeRolePolicyDocument=BASIC_ASSUME_ROLE_POLICY,
Policies=[{"PolicyName": "foo", "PolicyDocument": {}, "bar": 0}])
self.assertEqual("FAILED", response["Status"])
self.assertIn("Invalid inline policy parameter(s): bar",
response["Reason"])
response = self.invoke(
ResourceType="Custom::RestrictedRole", RoleName="role1",
AssumeRolePolicyDocument=BASIC_ASSUME_ROLE_POLICY,
Policies=[{"PolicyName": "foo", "PolicyDocument": 1}])
self.assertEqual("FAILED", response["Status"])
self.assertIn(
"Invalid type for parameter PolicyDocument, value: 1, type "
"<class 'int'>, valid types: (<class 'str'>, <class 'dict'>)",
response["Reason"])
def test_create_missing_assume_role(self):
response = self.invoke(
ResourceType="Custom::RestrictedRole", RoleName="role1")
self.assertEqual("FAILED", response["Status"])
self.assertIn("AssumeRolePolicyDocument is missing", response["Reason"])
def test_create_unknown_props(self):
response = self.invoke(
ResourceType="Custom::RestrictedRole", RoleName="role1",
AssumeRolePolicyDocument=BASIC_ASSUME_ROLE_POLICY,
Invalid=True)
self.assertEqual("FAILED", response["Status"])
self.assertIn("Unknown properties: Invalid", response["Reason"])
@mock_iam
class TestDirectInvoke(TestCase):
"""
Test direct Lambda invocation handling.
"""
mandatory_arn = ""
power_arn = ""
def setUp(self):
self.iam = boto3.client("iam")
result = self.iam.create_policy(
PolicyName="Mandatory",
PolicyDocument=json_dumps(OPEN_MANDATORY_POLICY))
environ["MANDATORY_ROLE_POLICY_ARN"] = self.mandatory_arn = \
result["Policy"]["Arn"]
result = self.iam.create_policy(
PolicyName="Power",
PolicyDocument=json_dumps(POWER_USER_POLICY))
self.power_arn = result["Policy"]["Arn"]
ResponseHandler.responses = []
def invoke(self, **kw): # pylint: disable=R0201
return rolemaker.lambda_handler(kw, None)
def test_basic_workflows(self):
role_name = "test-bw-%s" % randstr()
result = self.invoke(
Action="CreateRestrictedRole", RoleName=role_name,
AssumeRolePolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
self.assertNotIn("Error", result)
self.assertIn("Role", result)
role = self.iam.get_role(RoleName=role_name)
self.assertEqual(role["Role"]["RoleName"], role_name)
arp = role["Role"]["AssumeRolePolicyDocument"]
self.assertEqual(BASIC_ASSUME_ROLE_POLICY, arp)
result = self.invoke(
Action="AttachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=self.power_arn)
self.assertNotIn("Error", result)
result = self.invoke(
Action="PutRestrictedRolePolicy", RoleName=role_name,
PolicyName="Assume",
PolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
self.assertNotIn("Error", result)
result = self.iam.list_attached_role_policies(RoleName=role_name)
policy_arns = set([
policy["PolicyArn"] for policy in result["AttachedPolicies"]])
self.assertEqual(policy_arns, {self.mandatory_arn, self.power_arn})
result = self.iam.list_role_policies(RoleName=role_name)
self.assertEqual(result["PolicyNames"], ["Assume"])
result = self.iam.get_role_policy(
RoleName=role_name, PolicyName="Assume")
self.assertEqual(result["PolicyDocument"], BASIC_ASSUME_ROLE_POLICY)
result = self.invoke(
Action="DetachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=self.power_arn)
self.assertNotIn("Error", result)
result = self.invoke(
Action="DeleteRestrictedRolePolicy", RoleName=role_name,
PolicyName="Assume")
result = self.iam.list_attached_role_policies(RoleName=role_name)
policy_arns = set([
policy["PolicyArn"] for policy in result["AttachedPolicies"]])
self.assertEqual(
policy_arns, {environ["MANDATORY_ROLE_POLICY_ARN"]})
result = self.iam.list_role_policies(RoleName=role_name)
self.assertEqual(result["PolicyNames"], [])
result = self.invoke(
Action="UpdateAssumeRestrictedRolePolicy", RoleName=role_name,
PolicyDocument=json_dumps(LAMBDA_ASSUME_ROLE_POLICY))
self.assertNotIn("Error", result)
# Moto doesn't implement this yet.
if False: # pylint: disable=W0125
result = self.invoke(
Action="UpdateRestrictedRoleDescription", RoleName=role_name,
Description="New Description")
self.assertNotIn("Error", result)
self.assertEqual(
"New Description",
self.iam.get_role(RoleName=role_name)["Role"]["Description"])
result = self.invoke(
Action="DeleteRestrictedRole", RoleName=role_name)
self.assertNotIn("Error", result)
with self.assertRaises(BotoClientError):
role = self.iam.get_role(RoleName=role_name)
def test_attempt_modify_nonrestricted(self):
role_name = "test-amn-%s" % randstr()
def check_result(result):
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InvalidParameterValue")
self.assertIn(
"Role %s is not a restricted role" % role_name,
result["Error"]["Message"])
self.iam.create_role(
RoleName=role_name,
AssumeRolePolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
result = self.invoke(
Action="AttachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=self.power_arn)
check_result(result)
result = self.invoke(
Action="DetachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=self.power_arn)
check_result(result)
result = self.invoke(
Action="PutRestrictedRolePolicy", RoleName=role_name,
PolicyName="foo", PolicyDocument="{}")
check_result(result)
result = self.invoke(
Action="DeleteRestrictedRolePolicy", RoleName=role_name,
PolicyName="foo")
check_result(result)
result = self.invoke(
Action="UpdateAssumeRestrictedRolePolicy", RoleName=role_name,
PolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
check_result(result)
result = self.invoke(
Action="UpdateRestrictedRoleDescription", RoleName=role_name,
Description="Hello world")
check_result(result)
def test_attempt_detach_mandatory(self):
role_name = "test-dm-%s" % randstr()
self.invoke(
Action="CreateRestrictedRole", RoleName=role_name,
AssumeRolePolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
result = self.invoke(
Action="DetachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=environ["MANDATORY_ROLE_POLICY_ARN"])
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InvalidParameterValue")
self.assertIn("Cannot detach the mandatory policy.",
result["Error"]["Message"])
def test_empty_rolename(self):
def check_result(result):
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InvalidParameterValue")
self.assertIn(
"Invalid length for parameter RoleName, value: 0",
result["Error"]["Message"])
result = self.invoke(
Action="CreateRestrictedRole",
AssumeRolePolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
check_result(result)
result = self.invoke(
Action="CreateRestrictedRole",
RoleName="",
AssumeRolePolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
check_result(result)
result = self.invoke(Action="DeleteRestrictedRole")
check_result(result)
result = self.invoke(Action="DeleteRestrictedRole", RoleName="")
check_result(result)
result = self.invoke(
Action="AttachRestrictedRolePolicy",
PolicyArn="arn:aws:iam::aws:policy/AdministratorAccess")
check_result(result)
result = self.invoke(
Action="AttachRestrictedRolePolicy",
RoleName="",
PolicyArn="arn:aws:iam::aws:policy/AdministratorAccess")
check_result(result)
result = self.invoke(
Action="DetachRestrictedRolePolicy",
PolicyArn="arn:aws:iam::aws:policy/AdministratorAccess")
check_result(result)
result = self.invoke(
Action="DetachRestrictedRolePolicy",
RoleName="",
PolicyArn="arn:aws:iam::aws:policy/AdministratorAccess")
check_result(result)
result = self.invoke(
Action="UpdateRestrictedRoleDescription",
Description="This is a test description")
check_result(result)
result = self.invoke(
Action="UpdateRestrictedRoleDescription",
RoleName="",
Description="This is a test description")
check_result(result)
result = self.invoke(
Action="UpdateAssumeRestrictedRolePolicy",
PolicyDocument="{}")
check_result(result)
result = self.invoke(
Action="UpdateAssumeRestrictedRolePolicy",
RoleName="",
PolicyDocument="{}")
check_result(result)
result = self.invoke(
Action="PutRestrictedRolePolicy",
PolicyName="Foo",
PolicyDocument="{}")
check_result(result)
result = self.invoke(
Action="PutRestrictedRolePolicy",
RoleName="",
PolicyName="Foo",
PolicyDocument="{}")
check_result(result)
result = self.invoke(
Action="DeleteRestrictedRolePolicy",
PolicyName="Foo")
check_result(result)
result = self.invoke(
Action="DeleteRestrictedRolePolicy",
RoleName="",
PolicyName="Foo")
check_result(result)
def test_delete_nonexistent_role(self):
role_name = "test-dnr-%s" % randstr()
result = self.invoke(
Action="DeleteRestrictedRole", RoleName=role_name)
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "NoSuchEntity")
self.assertIn(
"Role %s not found" % role_name, result["Error"]["Message"])
def test_delete_nonexistent_attached_policy(self):
role_name = "test-dnap-%s" % randstr()
self.invoke(
Action="CreateRestrictedRole", RoleName=role_name,
AssumeRolePolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
result = self.invoke(
Action="DetachRestrictedRolePolicy", RoleName=role_name,
PolicyArn="arn:aws:iam:::policy/nonexistent")
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "NoSuchEntity")
self.assertIn(
"Policy arn:aws:iam:::policy/nonexistent was not found",
result["Error"]["Message"])
def test_attempt_delete_role_with_policies(self):
role_name = "test-drwp-%s" % randstr()
self.invoke(
Action="CreateRestrictedRole", RoleName=role_name,
AssumeRolePolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
result = self.invoke(
Action="AttachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=self.power_arn)
self.assertNotIn("Error", result)
result = self.invoke(
Action="DeleteRestrictedRole", RoleName=role_name)
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "DeleteConflict")
self.assertIn("Cannot delete entity, must detach all policies first.",
result["Error"]["Message"])
result = self.invoke(
Action="DetachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=self.power_arn)
self.assertNotIn("Error", result)
result = self.invoke(
Action="PutRestrictedRolePolicy", RoleName=role_name,
PolicyName="inline1",
PolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
self.assertNotIn("Error", result)
result = self.invoke(
Action="DeleteRestrictedRole", RoleName=role_name)
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "DeleteConflict")
self.assertIn("Cannot delete entity, must delete policies first.",
result["Error"]["Message"])
result = self.invoke(
Action="DeleteRestrictedRolePolicy", RoleName=role_name,
PolicyName="inline1")
self.assertNotIn("Error", result)
result = self.invoke(
Action="DeleteRestrictedRole", RoleName=role_name)
self.assertNotIn("Error", result)
def test_create_bad_parameters(self):
role_name = "test-cbp-%s" % randstr()
def check_result(result, message):
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InvalidParameterValue")
self.assertIn(message, result["Error"]["Message"])
check_result(
self.invoke(
Action="CreateRestrictedRole", RoleName=1234,
AssumeRolePolicyDocument="{}", Path="/", Description="1"),
"Invalid type for parameter RoleName, value: 1")
check_result(
self.invoke(
Action="CreateRestrictedRole", RoleName=role_name,
AssumeRolePolicyDocument="", Path="/", Description="1"),
"Invalid length for parameter AssumeRolePolicyDocument, value: 0")
check_result(
self.invoke(
Action="CreateRestrictedRole", RoleName=role_name,
AssumeRolePolicyDocument=1, Path="/", Description="1"),
"Invalid type for parameter AssumeRolePolicyDocument, value: 1")
check_result(
self.invoke(
Action="CreateRestrictedRole", RoleName=role_name,
AssumeRolePolicyDocument="{}", Path=1, Description="1"),
"Invalid type for parameter Path, value: 1")
check_result(
self.invoke(
Action="CreateRestrictedRole", RoleName=role_name,
AssumeRolePolicyDocument="{}", Path="/", Description=1),
"Invalid type for parameter Description, value: 1")
def test_attach_detach_bad_parameters(self):
role_name = "test-adbp-%s" % randstr()
def check_result(result, message):
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InvalidParameterValue")
self.assertIn(message, result["Error"]["Message"])
self.invoke(
Action="CreateRestrictedRole",
RoleName=role_name,
AssumeRolePolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
check_result(
self.invoke(
Action="AttachRestrictedRolePolicy", RoleName="",
PolicyArn=self.power_arn),
"Invalid length for parameter RoleName, value: 0")
check_result(
self.invoke(
Action="AttachRestrictedRolePolicy", RoleName=1,
PolicyArn=self.power_arn),
"Invalid type for parameter RoleName, value: 1")
check_result(
self.invoke(
Action="AttachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=""),
"Invalid length for parameter PolicyArn, value: 0")
check_result(
self.invoke(
Action="AttachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=1),
"Invalid type for parameter PolicyArn, value: 1")
check_result(
self.invoke(
Action="DetachRestrictedRolePolicy", RoleName="",
PolicyArn=self.power_arn),
"Invalid length for parameter RoleName, value: 0")
check_result(
self.invoke(
Action="DetachRestrictedRolePolicy", RoleName=1,
PolicyArn=self.power_arn),
"Invalid type for parameter RoleName, value: 1")
check_result(
self.invoke(
Action="DetachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=""),
"Invalid length for parameter PolicyArn, value: 0")
check_result(
self.invoke(
Action="DetachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=1),
"Invalid type for parameter PolicyArn, value: 1")
def test_put_delete_bad_parameters(self):
role_name = "test-pdbp-%s" % randstr()
def check_result(result, message):
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InvalidParameterValue")
self.assertIn(message, result["Error"]["Message"])
self.invoke(
Action="CreateRestrictedRole", RoleName=role_name,
AssumeRolePolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
check_result(
self.invoke(
Action="PutRestrictedRolePolicy", RoleName="",
PolicyName="inline1",
PolicyDocument=json_dumps(POWER_USER_POLICY)),
"Invalid length for parameter RoleName, value: 0")
check_result(
self.invoke(
Action="PutRestrictedRolePolicy", RoleName=1,
PolicyName="inline1",
PolicyDocument=json_dumps(POWER_USER_POLICY)),
"Invalid type for parameter RoleName, value: 1")
check_result(
self.invoke(
Action="PutRestrictedRolePolicy", RoleName=role_name,
PolicyName="",
PolicyDocument=json_dumps(POWER_USER_POLICY)),
"Invalid length for parameter PolicyName, value: 0")
check_result(
self.invoke(
Action="PutRestrictedRolePolicy", RoleName=role_name,
PolicyName=1,
PolicyDocument=json_dumps(POWER_USER_POLICY)),
"Invalid type for parameter PolicyName, value: 1")
check_result(
self.invoke(
Action="PutRestrictedRolePolicy", RoleName=role_name,
PolicyName="inline1", PolicyDocument=""),
"Invalid length for parameter PolicyDocument, value: 0")
check_result(
self.invoke(
Action="PutRestrictedRolePolicy", RoleName=role_name,
PolicyName="inline1", PolicyDocument=1),
"Invalid type for parameter PolicyDocument, value: 1")
check_result(
self.invoke(
Action="DeleteRestrictedRolePolicy", RoleName="",
PolicyName="inline1"),
"Invalid length for parameter RoleName, value: 0")
check_result(
self.invoke(
Action="DeleteRestrictedRolePolicy", RoleName=1,
PolicyName="inline1"),
"Invalid type for parameter RoleName, value: 1")
check_result(
self.invoke(
Action="DeleteRestrictedRolePolicy", RoleName=role_name,
PolicyName=""),
"Invalid length for parameter PolicyName, value: 0")
check_result(
self.invoke(
Action="DeleteRestrictedRolePolicy", RoleName=role_name,
PolicyName=1),
"Invalid type for parameter PolicyName, value: 1")
def test_missing_environ(self):
def check_result(result):
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InternalFailure")
self.assertEqual(result["Error"]["Type"], "Receiver")
self.assertIn(
"Environment variable MANDATORY_ROLE_POLICY_ARN has not "
"been set on the Lambda function.",
result["Error"]["Message"])
result = self.invoke(
Action="CreateRestrictedRole", RoleName="ok-role",
AssumeRolePolicyDocument=json_dumps(OPEN_MANDATORY_POLICY))
self.assertNotIn("Error", result)
del environ["MANDATORY_ROLE_POLICY_ARN"]
try:
check_result(self.invoke(
Action="CreateRestrictedRole", RoleName="test-role-missing-env",
AssumeRolePolicyDocument="{}", Path="/", Description=""))
check_result(self.invoke(
Action="DeleteRestrictedRole", RoleName="ok-role"))
check_result(self.invoke(
Action="AttachRestrictedRolePolicy", RoleName="ok-role",
PolicyArn=self.power_arn))
check_result(self.invoke(
Action="DetachRestrictedRolePolicy", RoleName="ok-role",
PolicyArn=self.power_arn))
check_result(self.invoke(
Action="PutRestrictedRolePolicy", RoleName="ok-role",
PolicyName="inline1",
PolicyDocument=json_dumps(POWER_USER_POLICY)))
check_result(self.invoke(
Action="DeleteRestrictedRolePolicy", RoleName="ok-role",
PolicyName="inline1"))
check_result(self.invoke(
Action="UpdateRestrictedRoleDescription", RoleName="ok-role",
Description="A new description"))
check_result(self.invoke(
Action="UpdateAssumeRestrictedRolePolicy", RoleName="ok-role",
PolicyDocument=json_dumps(OPEN_MANDATORY_POLICY)))
finally:
environ["MANDATORY_ROLE_POLICY_ARN"] = self.mandatory_arn
def test_bad_mandatory_policy(self):
invalid = "arn:aws:iam::aws:invalid-policy-name"
environ["MANDATORY_ROLE_POLICY_ARN"] = invalid
try:
result = self.invoke(
Action="CreateRestrictedRole", RoleName="test-role-bad-mand",
AssumeRolePolicyDocument="{}", Path="/", Description="")
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InternalFailure")
self.assertEqual(result["Error"]["Type"], "Receiver")
self.assertIn(
"Unable to attach MANDATORY_ROLE_POLICY_ARN %s "
"to newly created role." % invalid, result["Error"]["Message"])
with self.assertRaises(BotoClientError):
self.iam.get_role(RoleName="test-role-bad-mand")
finally:
environ["MANDATORY_ROLE_POLICY_ARN"] = self.mandatory_arn
def test_delete_non_restricted_role(self):
self.iam.create_role(
RoleName="ok-role-non-restrict",
AssumeRolePolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
result = self.invoke(
Action="DeleteRestrictedRole", RoleName="ok-role-non-restrict")
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InvalidParameterValue")
self.assertIn("Role ok-role-non-restrict is not a restricted "
"role.", result["Error"]["Message"])
def test_unknown_action(self):
result = self.invoke(Action="NotAnAction")
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InvalidAction")
def test_unknown_parameters(self):
result = self.invoke(Action="CreateRestrictedRole", Invalid=1)
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InvalidParameterValue")
self.assertIn("Unknown parameter(s): Invalid",
result["Error"]["Message"])
|
dacut/rolemaker
|
tests/test_handler.py
|
Python
|
apache-2.0
| 45,942
|
"""Home Assistant auth provider."""
from __future__ import annotations
import asyncio
import base64
from collections.abc import Mapping
import logging
from typing import Any, cast
import bcrypt
import voluptuous as vol
from homeassistant.const import CONF_ID
from homeassistant.core import HomeAssistant, callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.exceptions import HomeAssistantError
from . import AUTH_PROVIDER_SCHEMA, AUTH_PROVIDERS, AuthProvider, LoginFlow
from ..models import Credentials, UserMeta
# mypy: disallow-any-generics
STORAGE_VERSION = 1
STORAGE_KEY = "auth_provider.homeassistant"
def _disallow_id(conf: dict[str, Any]) -> dict[str, Any]:
"""Disallow ID in config."""
if CONF_ID in conf:
raise vol.Invalid("ID is not allowed for the homeassistant auth provider.")
return conf
CONFIG_SCHEMA = vol.All(AUTH_PROVIDER_SCHEMA, _disallow_id)
@callback
def async_get_provider(hass: HomeAssistant) -> HassAuthProvider:
"""Get the provider."""
for prv in hass.auth.auth_providers:
if prv.type == "homeassistant":
return cast(HassAuthProvider, prv)
raise RuntimeError("Provider not found")
class InvalidAuth(HomeAssistantError):
"""Raised when we encounter invalid authentication."""
class InvalidUser(HomeAssistantError):
"""Raised when invalid user is specified.
Will not be raised when validating authentication.
"""
class Data:
"""Hold the user data."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the user data store."""
self.hass = hass
self._store = hass.helpers.storage.Store(
STORAGE_VERSION, STORAGE_KEY, private=True
)
self._data: dict[str, Any] | None = None
# Legacy mode will allow usernames to start/end with whitespace
# and will compare usernames case-insensitive.
# Remove in 2020 or when we launch 1.0.
self.is_legacy = False
@callback
def normalize_username(self, username: str) -> str:
"""Normalize a username based on the mode."""
if self.is_legacy:
return username
return username.strip().casefold()
async def async_load(self) -> None:
"""Load stored data."""
if (data := await self._store.async_load()) is None:
data = {"users": []}
seen: set[str] = set()
for user in data["users"]:
username = user["username"]
# check if we have duplicates
if (folded := username.casefold()) in seen:
self.is_legacy = True
logging.getLogger(__name__).warning(
"Home Assistant auth provider is running in legacy mode "
"because we detected usernames that are case-insensitive"
"equivalent. Please change the username: '%s'.",
username,
)
break
seen.add(folded)
# check if we have unstripped usernames
if username != username.strip():
self.is_legacy = True
logging.getLogger(__name__).warning(
"Home Assistant auth provider is running in legacy mode "
"because we detected usernames that start or end in a "
"space. Please change the username: '%s'.",
username,
)
break
self._data = data
@property
def users(self) -> list[dict[str, str]]:
"""Return users."""
return self._data["users"] # type: ignore
def validate_login(self, username: str, password: str) -> None:
"""Validate a username and password.
Raises InvalidAuth if auth invalid.
"""
username = self.normalize_username(username)
dummy = b"$2b$12$CiuFGszHx9eNHxPuQcwBWez4CwDTOcLTX5CbOpV6gef2nYuXkY7BO"
found = None
# Compare all users to avoid timing attacks.
for user in self.users:
if self.normalize_username(user["username"]) == username:
found = user
if found is None:
# check a hash to make timing the same as if user was found
bcrypt.checkpw(b"foo", dummy)
raise InvalidAuth
user_hash = base64.b64decode(found["password"])
# bcrypt.checkpw is timing-safe
if not bcrypt.checkpw(password.encode(), user_hash):
raise InvalidAuth
def hash_password( # pylint: disable=no-self-use
self, password: str, for_storage: bool = False
) -> bytes:
"""Encode a password."""
hashed: bytes = bcrypt.hashpw(password.encode(), bcrypt.gensalt(rounds=12))
if for_storage:
hashed = base64.b64encode(hashed)
return hashed
def add_auth(self, username: str, password: str) -> None:
"""Add a new authenticated user/pass."""
username = self.normalize_username(username)
if any(
self.normalize_username(user["username"]) == username for user in self.users
):
raise InvalidUser
self.users.append(
{
"username": username,
"password": self.hash_password(password, True).decode(),
}
)
@callback
def async_remove_auth(self, username: str) -> None:
"""Remove authentication."""
username = self.normalize_username(username)
index = None
for i, user in enumerate(self.users):
if self.normalize_username(user["username"]) == username:
index = i
break
if index is None:
raise InvalidUser
self.users.pop(index)
def change_password(self, username: str, new_password: str) -> None:
"""Update the password.
Raises InvalidUser if user cannot be found.
"""
username = self.normalize_username(username)
for user in self.users:
if self.normalize_username(user["username"]) == username:
user["password"] = self.hash_password(new_password, True).decode()
break
else:
raise InvalidUser
async def async_save(self) -> None:
"""Save data."""
await self._store.async_save(self._data)
@AUTH_PROVIDERS.register("homeassistant")
class HassAuthProvider(AuthProvider):
"""Auth provider based on a local storage of users in Home Assistant config dir."""
DEFAULT_TITLE = "Home Assistant Local"
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Initialize an Home Assistant auth provider."""
super().__init__(*args, **kwargs)
self.data: Data | None = None
self._init_lock = asyncio.Lock()
async def async_initialize(self) -> None:
"""Initialize the auth provider."""
async with self._init_lock:
if self.data is not None:
return
data = Data(self.hass)
await data.async_load()
self.data = data
async def async_login_flow(self, context: dict[str, Any] | None) -> LoginFlow:
"""Return a flow to login."""
return HassLoginFlow(self)
async def async_validate_login(self, username: str, password: str) -> None:
"""Validate a username and password."""
if self.data is None:
await self.async_initialize()
assert self.data is not None
await self.hass.async_add_executor_job(
self.data.validate_login, username, password
)
async def async_add_auth(self, username: str, password: str) -> None:
"""Call add_auth on data."""
if self.data is None:
await self.async_initialize()
assert self.data is not None
await self.hass.async_add_executor_job(self.data.add_auth, username, password)
await self.data.async_save()
async def async_remove_auth(self, username: str) -> None:
"""Call remove_auth on data."""
if self.data is None:
await self.async_initialize()
assert self.data is not None
self.data.async_remove_auth(username)
await self.data.async_save()
async def async_change_password(self, username: str, new_password: str) -> None:
"""Call change_password on data."""
if self.data is None:
await self.async_initialize()
assert self.data is not None
await self.hass.async_add_executor_job(
self.data.change_password, username, new_password
)
await self.data.async_save()
async def async_get_or_create_credentials(
self, flow_result: Mapping[str, str]
) -> Credentials:
"""Get credentials based on the flow result."""
if self.data is None:
await self.async_initialize()
assert self.data is not None
norm_username = self.data.normalize_username
username = norm_username(flow_result["username"])
for credential in await self.async_credentials():
if norm_username(credential.data["username"]) == username:
return credential
# Create new credentials.
return self.async_create_credentials({"username": username})
async def async_user_meta_for_credentials(
self, credentials: Credentials
) -> UserMeta:
"""Get extra info for this credential."""
return UserMeta(name=credentials.data["username"], is_active=True)
async def async_will_remove_credentials(self, credentials: Credentials) -> None:
"""When credentials get removed, also remove the auth."""
if self.data is None:
await self.async_initialize()
assert self.data is not None
try:
self.data.async_remove_auth(credentials.data["username"])
await self.data.async_save()
except InvalidUser:
# Can happen if somehow we didn't clean up a credential
pass
class HassLoginFlow(LoginFlow):
"""Handler for the login flow."""
async def async_step_init(
self, user_input: dict[str, str] | None = None
) -> FlowResult:
"""Handle the step of the form."""
errors = {}
if user_input is not None:
try:
await cast(HassAuthProvider, self._auth_provider).async_validate_login(
user_input["username"], user_input["password"]
)
except InvalidAuth:
errors["base"] = "invalid_auth"
if not errors:
user_input.pop("password")
return await self.async_finish(user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Required("username"): str,
vol.Required("password"): str,
}
),
errors=errors,
)
|
aronsky/home-assistant
|
homeassistant/auth/providers/homeassistant.py
|
Python
|
apache-2.0
| 11,050
|
# Copyright 2014 Josh Pieper, jjp@pobox.com.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Inverse kinematics for 3dof legs.'''
import math
from ..tf.tf import Point3D
class Configuration(object):
coxa_min_deg = None
coxa_idle_deg = None
coxa_max_deg = None
coxa_length_mm = None
coxa_sign = 1
coxa_ident = None
femur_min_deg = None
femur_idle_deg = None
femur_max_deg = None
femur_length_mm = None
femur_sign = 1
femur_ident = None
tibia_min_deg = None
tibia_idle_deg = None
tibia_max_deg = None
tibia_length_mm = None
tibia_sign = 1
tibia_ident = None
servo_speed_dps = 360.0
@staticmethod
def get_attributes():
return [key for key, value in Configuration.__dict__.iteritems()
if (not key.startswith('__') and
not callable(value) and
not isinstance(value, staticmethod))]
def write_settings(self, config, group_name):
config.add_section(group_name)
for x in Configuration.get_attributes():
config.set(group_name, x, getattr(self, x))
@staticmethod
def read_settings(config, group_name):
result = Configuration()
for x in Configuration.get_attributes():
if config.has_option(group_name, x):
if x.endswith('sign') or x.endswith('ident'):
value = config.getint(group_name, x)
else:
value = config.getfloat(group_name, x)
setattr(result, x, value)
return result
class JointAngles(object):
config = None
coxa_deg = None # positive is rotating clockwise viewed from top
femur_deg = None # positive is rotating upward
tibia_deg = None # positive is rotating upward
def command_dict(self):
'''Return a dictionary mapping servo identifiers to commands
in degrees. This is the same format as the servo_controller
module uses.'''
return { self.config.coxa_ident : self.coxa_deg,
self.config.femur_ident : self.femur_deg,
self.config.tibia_ident : self.tibia_deg }
def lizard_3dof_ik(point_mm, config):
'''Given a target end position in 3D coordinate space, return the
required joint angles for a 3 degree of freedom lizard style
leg.
+y is away from the shoulder
+x is clockwise from shoulder
+z is up
If no solution is possible, return None.
'''
# Solve for the coxa first, as it has only a single solution.
coxa_deg = (config.coxa_sign *
math.degrees(math.atan2(point_mm.x, point_mm.y)) +
config.coxa_idle_deg)
if (coxa_deg < config.coxa_min_deg or
coxa_deg > config.coxa_max_deg):
return None
# x-coordinate of femur/tibia pair after rotating to 0 coxa
true_x = (math.sqrt(point_mm.x ** 2 + point_mm.y ** 2) -
config.coxa_length_mm)
im = math.sqrt(point_mm.z ** 2 + true_x ** 2)
# The new femur/tibia pair makes a triangle where the 3rd side is
# the hypotenuse of the right triangle composed of z and im, lets
# call it c.
#
# --\ femur
# |\ --\
# | \ --\
# | -- |
# z| im\ | tibia
# | --\|
# ----------
# true_x
#
# im = math.sqrt(z ** 2 + true_x ** 2)
#
# Then, we can use the law of cosines to find the angle opposite
# im, which is the angle between the femur and tibia.
#
# im ** 2 = a ** 2 + b ** 2 + 2 * a * b * cos(C)
#
# Solving for C yields:
#
# C = acos((im ** 2 - a ** 2 - b ** 2) / (2 * a * b))
tibia_cos = ((im ** 2 -
config.tibia_length_mm ** 2 -
config.femur_length_mm ** 2) /
(2 * config.tibia_length_mm * config.femur_length_mm))
if tibia_cos < -1.0 or tibia_cos > 1.0:
return None
# For our purposes, a 0 tibia angle should equate to a right angle
# with the femur, so subtract off 90 degrees.
tibia_deg = (config.tibia_sign *
math.degrees(0.5 * math.pi - math.acos(tibia_cos)) +
config.tibia_idle_deg)
if (tibia_deg < config.tibia_min_deg or
tibia_deg > config.tibia_max_deg):
return None
# To solve for the femur angle, we first get the angle opposite
# true_x, then the angle opposite the tibia.
true_x_deg = math.degrees(math.atan2(true_x, -point_mm.z))
# Then the angle opposite the tibia is also found the via the law
# of cosines.
#
# tibia ** 2 = femur ** 2 + im ** 2 + 2 * femur * im * cos(femur_im)
#
# femur_im = acos ( (tibia ** 2 - im ** 2 - femur ** 2) /
# (2 * femur * im) )
femur_im_cos = -(config.tibia_length_mm ** 2 -
config.femur_length_mm ** 2 -
im ** 2) / (2 * config.femur_length_mm * im)
if femur_im_cos < -1.0 or femur_im_cos > 1.0:
return None
femur_im_deg = math.degrees(math.acos(femur_im_cos))
femur_deg = (config.femur_sign * ((femur_im_deg + true_x_deg) - 90.0) +
config.femur_idle_deg)
if (femur_deg < config.femur_min_deg or
femur_deg > config.femur_max_deg):
return None
result = JointAngles()
result.config = config
result.coxa_deg = coxa_deg
result.femur_deg = femur_deg
result.tibia_deg = tibia_deg
return result
class LizardIk(object):
def __init__(self, config):
self.config = config
def do_ik(self, point_mm):
return lizard_3dof_ik(point_mm, self.config)
def worst_case_speed_mm_s(self, point_mm, direction_mm=None):
'''Return the worst case linear velocity the end effector can
achieve in the given orientation.'''
step = 0.01
nominal = self.do_ik(point_mm)
if nominal is None:
return None
servo_step = step * self.config.servo_speed_dps
result = None
def update(result, advanced_servo_deg, nominal_servo_deg):
if advanced_servo_deg == nominal_servo_deg:
return
this_speed = (servo_step /
abs(advanced_servo_deg - nominal_servo_deg))
if result is None or this_speed < result:
result = this_speed
return result
if direction_mm:
normalized = direction_mm.scaled(1.0 / direction_mm.length())
consider = [normalized.scaled(step)]
else:
consider = [Point3D(*val) for val in
(step, 0., 0.), (0., step, 0.), (0., 0., step)]
for advance in consider:
advanced = self.do_ik(point_mm + advance)
if advanced is None:
return None
result = update(result, advanced.coxa_deg, nominal.coxa_deg)
result = update(result, advanced.femur_deg, nominal.femur_deg)
result = update(result, advanced.tibia_deg, nominal.tibia_deg)
return result
def servo_speed_dps(self):
return self.config.servo_speed_dps
def largest_change_deg(self, result1, result2):
return max(abs(result1.coxa_deg - result2.coxa_deg),
abs(result1.femur_deg - result2.femur_deg),
abs(result1.tibia_deg - result2.tibia_deg))
|
Syralist/yet-another-hexapod
|
hexapy/legtool/ik/gait/leg_ik.py
|
Python
|
mit
| 7,942
|
from __future__ import absolute_import
from django.test import TestCase
from .models import Reporter, Article
class ManyToOneNullTests(TestCase):
def setUp(self):
# Create a Reporter.
self.r = Reporter(name='John Smith')
self.r.save()
# Create an Article.
self.a = Article(headline="First", reporter=self.r)
self.a.save()
# Create an Article via the Reporter object.
self.a2 = self.r.article_set.create(headline="Second")
# Create an Article with no Reporter by passing "reporter=None".
self.a3 = Article(headline="Third", reporter=None)
self.a3.save()
# Create another article and reporter
self.r2 = Reporter(name='Paul Jones')
self.r2.save()
self.a4 = self.r2.article_set.create(headline='Fourth')
def test_get_related(self):
self.assertEqual(self.a.reporter.id, self.r.id)
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
def test_created_via_related_set(self):
self.assertEqual(self.a2.reporter.id, self.r.id)
def test_related_set(self):
# Reporter objects have access to their related Article objects.
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>'])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='Fir'),
['<Article: First>'])
self.assertEqual(self.r.article_set.count(), 2)
def test_created_without_related(self):
self.assertEqual(self.a3.reporter, None)
# Need to reget a3 to refresh the cache
a3 = Article.objects.get(pk=self.a3.pk)
self.assertRaises(AttributeError, getattr, a3.reporter, 'id')
# Accessing an article's 'reporter' attribute returns None
# if the reporter is set to None.
self.assertEqual(a3.reporter, None)
# To retrieve the articles with no reporters set, use "reporter__isnull=True".
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True),
['<Article: Third>'])
# We can achieve the same thing by filtering for the case where the
# reporter is None.
self.assertQuerysetEqual(Article.objects.filter(reporter=None),
['<Article: Third>'])
# Set the reporter for the Third article
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>'])
self.r.article_set.add(a3)
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>', '<Article: Third>'])
# Remove an article from the set, and check that it was removed.
self.r.article_set.remove(a3)
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>'])
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True),
['<Article: Third>'])
def test_remove_from_wrong_set(self):
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Fourth>'])
# Try to remove a4 from a set it does not belong to
self.assertRaises(Reporter.DoesNotExist, self.r.article_set.remove, self.a4)
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Fourth>'])
def test_assign_clear_related_set(self):
# Use descriptor assignment to allocate ForeignKey. Null is legal, so
# existing members of set that are not in the assignment set are set null
self.r2.article_set = [self.a2, self.a3]
self.assertQuerysetEqual(self.r2.article_set.all(),
['<Article: Second>', '<Article: Third>'])
# Clear the rest of the set
self.r.article_set.clear()
self.assertQuerysetEqual(self.r.article_set.all(), [])
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True),
['<Article: First>', '<Article: Fourth>'])
def test_clear_efficiency(self):
r = Reporter.objects.create()
for _ in range(3):
r.article_set.create()
with self.assertNumQueries(1):
r.article_set.clear()
self.assertEqual(r.article_set.count(), 0)
|
openhatch/new-mini-tasks
|
vendor/packages/Django/tests/modeltests/many_to_one_null/tests.py
|
Python
|
apache-2.0
| 4,478
|
__author__ = "Hannes Hoettinger"
import numpy as np
import cv2
import time
import cv2.cv as cv
import math
import pickle
img = cv2.imread("D:\Projekte\PycharmProjects\DartsScorer\Darts\Dartboard_2.png")
img2 = cv2.imread("D:\Projekte\PycharmProjects\DartsScorer\Darts\Dartboard_3.png")
vidcap = cv2.VideoCapture("C:\Users\hanne\OneDrive\Projekte\GitHub\darts\Darts\Darts_Testvideo_9_1.mp4")
from_video = True
DEBUG = True
winName = "test2"
center_dartboard = []
ring_radius = []
transformation_matrix = []
class dartThrow:
def __init__(self):
self.base = -1
self.multiplier = -1
self.magnitude = -1
self.angle = -1
#For file IO
class CalibrationData:
def __init__(self):
#for perspective transform
self.top = []
self.bottom = []
self.left = []
self.right = []
#for calculating the first angle
self.init_point_arr = []
self.center_dartboard = []
#initial angle of the 20 / 1 points divider
self.ref_angle = []
#radii of the rings, there are 6 in total
self.ring_radius = []
self.transformationMatrix = []
## improve and make circle radius accessible
def drawBoard():
raw_loc_mat = np.zeros((800, 800, 3))
# draw board
cv2.circle(raw_loc_mat, (400, 400), 170 * 2, (255, 255, 255), 1) # outside double
cv2.circle(raw_loc_mat, (400, 400), 160 * 2, (255, 255, 255), 1) # inside double
cv2.circle(raw_loc_mat, (400, 400), 107 * 2, (255, 255, 255), 1) # outside treble
cv2.circle(raw_loc_mat, (400, 400), 97 * 2, (255, 255, 255), 1) # inside treble
cv2.circle(raw_loc_mat, (400, 400), 16 * 2, (255, 255, 255), 1) # 25
cv2.circle(raw_loc_mat, (400, 400), 7 * 2, (255, 255, 255), 1) # Bulls eye
# 20 sectors...
sectorangle = 2 * math.pi / 20
i = 0
while (i < 20):
cv2.line(raw_loc_mat, (400, 400), (
int(400 + 170 * 2 * math.cos((0.5 + i) * sectorangle)),
int(400 + 170 * 2 * math.sin((0.5 + i) * sectorangle))), (255, 255, 255), 1)
i = i + 1
return raw_loc_mat
def dist(x1,y1, x2,y2, x3,y3): # x3,y3 is the point
px = x2-x1
py = y2-y1
something = px*px + py*py
u = ((x3 - x1) * px + (y3 - y1) * py) / float(something)
if u > 1:
u = 1
elif u < 0:
u = 0
x = x1 + u * px
y = y1 + u * py
dx = x - x3
dy = y - y3
# Note: If the actual distance does not matter,
# if you only want to compare what this function
# returns to other results of this function, you
# can just return the squared distance instead
# (i.e. remove the sqrt) to gain a little performance
dist = math.sqrt(dx*dx + dy*dy)
return dist
def DartLocation(x_coord,y_coord):
try:
#start a fresh set of points
points = []
calFile = open('calibrationData.pkl', 'rb')
calData = CalibrationData()
calData = pickle.load(calFile)
#load the data into the global variables
global transformation_matrix
transformation_matrix = calData.transformationMatrix
global ring_radius
ring_radius.append(calData.ring_radius[0])
ring_radius.append(calData.ring_radius[1])
ring_radius.append(calData.ring_radius[2])
ring_radius.append(calData.ring_radius[3])
ring_radius.append(calData.ring_radius[4])
ring_radius.append(calData.ring_radius[5]) # append the 6 ring radii
global center_dartboard
center_dartboard = calData.center_dartboard
#close the file once we are done reading the data
calFile.close()
#print "Raw dart location:"
#print x_coord,y_coord
# transform only the hit point with the saved transformation matrix
dart_loc_temp = np.array([[x_coord, y_coord]], dtype="float32")
dart_loc_temp = np.array([dart_loc_temp])
dart_loc = cv2.perspectiveTransform(dart_loc_temp, transformation_matrix)
new_dart_loc = tuple(dart_loc.reshape(1, -1)[0])
return new_dart_loc
#system not calibrated
except AttributeError as err1:
print err1
return (-1, -1)
except NameError as err2:
#not calibrated error
print err2
return (-2, -2)
#Returns dartThrow (score, multiplier, angle, magnitude) based on x,y location
def DartRegion(dart_loc):
try:
height = 800
width = 800
global dartInfo
dartInfo = dartThrow()
#find the magnitude and angle of the dart
vx = (dart_loc[0] - center_dartboard[0])
vy = (center_dartboard[1] - dart_loc[1])
# reference angle for atan2 conversion
ref_angle = 81
dart_magnitude = math.sqrt(math.pow(vx, 2) + math.pow(vy, 2))
dart_angle = math.fmod(((math.atan2(vy,vx) * 180/math.pi) + 360 - ref_angle), 360)
dartInfo.magnitude = dart_magnitude
dartInfo.angle = dart_angle
angleDiffMul = int((dart_angle) / 18.0)
print vx, vy, dart_angle
#starting from the 20 points
if angleDiffMul == 19:
dartInfo.base = 20
elif angleDiffMul == 0:
dartInfo.base = 5
elif angleDiffMul == 1:
dartInfo.base = 12
elif angleDiffMul == 2:
dartInfo.base = 9
elif angleDiffMul == 3:
dartInfo.base = 14
elif angleDiffMul == 4:
dartInfo.base = 11
elif angleDiffMul == 5:
dartInfo.base = 8
elif angleDiffMul == 6:
dartInfo.base = 16
elif angleDiffMul == 7:
dartInfo.base = 7
elif angleDiffMul == 8:
dartInfo.base = 19
elif angleDiffMul == 9:
dartInfo.base = 3
elif angleDiffMul == 10:
dartInfo.base = 17
elif angleDiffMul == 11:
dartInfo.base = 2
elif angleDiffMul == 12:
dartInfo.base = 15
elif angleDiffMul == 13:
dartInfo.base = 10
elif angleDiffMul == 14:
dartInfo.base = 6
elif angleDiffMul == 15:
dartInfo.base = 13
elif angleDiffMul == 16:
dartInfo.base = 4
elif angleDiffMul == 17:
dartInfo.base = 18
elif angleDiffMul == 18:
dartInfo.base = 1
else:
#something went wrong
dartInfo.base = -300
#Calculating multiplier (and special cases for Bull's Eye):
for i in range(0, len(ring_radius)):
#Find the ring that encloses the dart
if dartInfo.magnitude <= ring_radius[i]:
#Bull's eye, adjust base score
if i == 0:
dartInfo.base = 25
dartInfo.multiplier = 2
elif i == 1:
dartInfo.base = 25
dartInfo.multiplier = 1
#triple ring
elif i == 3:
dartInfo.multiplier = 3
#double ring
elif i == 5:
dartInfo.multiplier = 2
#single
elif i == 2 or i == 4:
dartInfo.multiplier = 1
#finished calculation
break
#miss
if dartInfo.magnitude > ring_radius[5]:
dartInfo.base = 0
dartInfo.multiplier = 0
return dartInfo
#system not calibrated
except AttributeError as err1:
print err1
dartInfo = dartThrow()
return dartInfo
except NameError as err2:
#not calibrated error
print err2
dartInfo = dartThrow()
return dartInfo
#if breaker == 3:
# break
def getDart():
global finalScore
global transformation_matrix
debug_img = drawBoard()
finalScore = 0
count = 0
breaker = 0
success = 1
## threshold important -> make accessible
x = 3000
# Read first image twice (issue somewhere) to start loop:
t = cv2.cvtColor(vidcap.read()[1], cv2.COLOR_RGB2GRAY)
# wait for camera
time.sleep(0.1)
t = cv2.cvtColor(vidcap.read()[1], cv2.COLOR_RGB2GRAY)
while success:
time.sleep(0.1)
success,image = vidcap.read()
t_plus = cv2.cvtColor(vidcap.read()[1], cv2.COLOR_RGB2GRAY)
dimg = cv2.absdiff(t, t_plus)
# cv2.imshow(winName, edges(t_minus, t, t_plus))
blur = cv2.GaussianBlur(dimg,(5,5),0)
blur = cv2.bilateralFilter(blur,9,75,75)
ret, thresh = cv2.threshold(blur, 60, 255, 0)
if cv2.countNonZero(thresh) > x and cv2.countNonZero(thresh) < 15000: ## threshold important -> make accessible
if from_video:
t_plus = cv2.cvtColor(vidcap.read()[1], cv2.COLOR_RGB2GRAY)
t_plus = cv2.cvtColor(vidcap.read()[1], cv2.COLOR_RGB2GRAY)
t_plus = cv2.cvtColor(vidcap.read()[1], cv2.COLOR_RGB2GRAY)
t_plus = cv2.cvtColor(vidcap.read()[1], cv2.COLOR_RGB2GRAY)
t_plus = cv2.cvtColor(vidcap.read()[1], cv2.COLOR_RGB2GRAY)
else:
time.sleep(0.2)
t_plus = cv2.cvtColor(vidcap.read()[1], cv2.COLOR_RGB2GRAY)
cv2.imshow(winName, t_plus)
dimg = cv2.absdiff(t, t_plus)
## kernel size important -> make accessible
# filter noise from image distortions
kernel = np.ones((8, 8), np.float32) / 40
blur = cv2.filter2D(dimg, -1, kernel)
#blur = cv2.GaussianBlur(dimg,(3,3),1)
#blur = cv2.bilateralFilter(blur,3,10,70) # 10,70
# remove image distortions
#kernel = np.ones((1, 1), np.uint8)
#blur = cv2.morphologyEx(blur, cv2.MORPH_OPEN, kernel)
#kernel = np.ones((1, 1), np.uint8)
# blur = cv2.dilate(blur, kernel, iterations=2)
#blur = cv2.erode(blur, kernel, iterations=1)
# number of features to track is a distinctive feature
#edges = cv2.goodFeaturesToTrack(blur,200,0.01,0,mask=None, blockSize=2, useHarrisDetector=1, k=0.001)
## FeaturesToTrack important -> make accessible
edges = cv2.goodFeaturesToTrack(blur,640,0.0008,3,mask=None, blockSize=3, useHarrisDetector=1, k=0.06) # k=0.08
corners = np.int0(edges)
testimg = blur.copy()
t_plus_copy = t_plus.copy()
# filter corners
cornerdata = []
tt = 0
mean_corners = np.mean(corners, axis=0)
for i in corners:
xl, yl = i.ravel()
## threshold important -> make accessible
# filter noise to only get dart arrow
if abs(mean_corners[0][0] - xl) > 180:
cornerdata.append(tt)
if abs(mean_corners[0][1] - yl) > 120:
cornerdata.append(tt)
tt += 1
corners_new = np.delete(corners, [cornerdata], axis=0) # delete corners to form new array
# find left and rightmost corners
rows,cols = dimg.shape[:2]
[vx,vy,x,y] = cv2.fitLine(corners_new,cv.CV_DIST_HUBER, 0,0.1,0.1)
lefty = int((-x*vy/vx) + y)
righty = int(((cols-x)*vy/vx)+y)
cornerdata = []
tt = 0
for i in corners_new:
xl,yl = i.ravel()
# check distance to fitted line, only draw corners within certain range
distance = dist(0,lefty, cols-1,righty, xl,yl)
if distance < 40: ## threshold important -> make accessible
cv2.circle(testimg,(xl,yl),3,255,-1)
else: # only save corners within certain range
cornerdata.append(tt)
tt += 1
corners_final = np.delete(corners_new, [cornerdata], axis=0) # delete corners to form new array
ret, thresh = cv2.threshold(blur, 60, 255, 0)
## threshold important -> make accessible
if cv2.countNonZero(thresh) > 15000:
continue
x,y,w,h = cv2.boundingRect(corners_final)
cv2.rectangle(t_plus_copy,(x,y),(x+w,y+h),(0,255,0),1)
breaker += 1
# find maximum x distance to dart tip, if camera is mounted on top
maxloc = np.argmax(corners_final, axis=0) # check max pos!!!, write image with circle??!!!
locationofdart = corners_final[maxloc]
try:
# check if dart location has neighbouring corners (if not -> continue)
cornerdata = []
tt = 0
for i in corners_final:
xl, yl = i.ravel()
distance = abs(locationofdart.item(0) - xl) + abs(locationofdart.item(1) - yl)
if distance < 40: ## threshold important -> make accessible
tt += 1
else:
cornerdata.append(tt)
if tt < 3:
corners_temp = cornerdata
maxloc = np.argmax(corners_temp, axis=0)
locationofdart = corners_temp[maxloc]
print "### used different location due to noise!"
cv2.circle(t_plus_copy, (locationofdart.item(0),locationofdart.item(1)), 10,(0, 0, 0),2, 8)
cv2.circle(t_plus_copy, (locationofdart.item(0), locationofdart.item(1)), 2, (0, 0, 0), 2, 8)
# check for the location of the dart with the calibration
dartloc = DartLocation(locationofdart.item(0), locationofdart.item(1))
dartInfo = DartRegion(dartloc) #cal_image
except:
print "Something went wrong in finding the darts location!"
continue
# check for the location of the dart with the calibration
print dartInfo.base, dartInfo.multiplier
if breaker == 1:
cv2.imwrite("frame2.jpg", testimg) # save dart1 frame
elif breaker == 2:
cv2.imwrite("frame3.jpg", testimg) # save dart2 frame
elif breaker == 3:
cv2.imwrite("frame4.jpg", testimg) # save dart3 frame
# save new diff img for next dart
t = t_plus
finalScore += (dartInfo.base * dartInfo.multiplier)
if DEBUG:
loc_x = dartloc[0] #400 + dartInfo.magnitude * math.tan(dartInfo.angle * math.pi/180)
loc_y = dartloc[1] #400 + dartInfo.magnitude * math.tan(dartInfo.angle * math.pi/180)
cv2.circle(debug_img, (int(loc_x), int(loc_y)), 2, (0, 255, 0), 2, 8)
cv2.circle(debug_img, (int(loc_x), int(loc_y)), 6, (0, 255, 0), 1, 8)
string = "" + str(dartInfo.base) + "x" + str(dartInfo.multiplier)
# add text (before clear with rectangle)
cv2.rectangle(debug_img, (600, 700), (800, 800), (0, 0, 0), -1)
cv2.putText(debug_img, string, (600, 750), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2, 8)
cv2.namedWindow(winName, cv2.WINDOW_NORMAL)
cv2.namedWindow("raw", cv2.WINDOW_NORMAL)
cv2.namedWindow("test", cv2.WINDOW_NORMAL)
cv2.imshow(winName, debug_img)
cv2.imshow("raw", t_plus_copy)
cv2.imshow("test", testimg)
else:
cv2.imshow(winName, testimg)
#if breaker == 3:
# break
# missed dart
elif cv2.countNonZero(thresh) < 35000:
continue
# if player enters zone - break loop
elif cv2.countNonZero(thresh) > 35000:
break
key = cv2.waitKey(10)
if key == 27:
cv2.destroyWindow(winName)
break
count += 1
dartInfo = dartThrow()
if __name__ == '__main__':
print "Welcome to darts!"
getDart()
#getTransformation()
|
hanneshoettinger/opencv-steel-darts
|
GetDart.py
|
Python
|
gpl-3.0
| 16,538
|
from rest_framework.request import Request
from django.utils.functional import SimpleLazyObject
from django.contrib.auth.middleware import get_user
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
def get_user_jwt(request):
try:
user_jwt = JSONWebTokenAuthentication().authenticate(Request(request))
# print "user_jwt: ", user_jwt
if user_jwt is not None:
return user_jwt[0], user_jwt[1]
else:
return None, None
except Exception, e:
print "exception", e
pass
return None, None
class AuthenticationMiddlewareJWT(object):
def process_request(self, request):
assert hasattr(request, 'session'), "The Django authentication middleware requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'."
user,auth = get_user_jwt(request)
# print "GOT HERE!!!"
# print "user:", user
# print "auth:", auth
# print "-----"
# request.user = SimpleLazyObject(lambda: user )
request.jwtuser = SimpleLazyObject(lambda: user )
request.jwtauth = auth
|
verdverm/starterKit
|
be/django/app/middleware.py
|
Python
|
mit
| 1,208
|
###############################################################################
#
# ChartLine - A class for writing the Excel XLSX Line charts.
#
# Copyright 2013-2014, John McNamara, jmcnamara@cpan.org
#
from . import chart
class ChartLine(chart.Chart):
"""
A class for writing the Excel XLSX Line charts.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self, options=None):
"""
Constructor.
"""
super(ChartLine, self).__init__()
if options is None:
options = {}
self.default_marker = {'type': 'none'}
self.smooth_allowed = True
###########################################################################
#
# Private API.
#
###########################################################################
def _write_chart_type(self, args):
# Override the virtual superclass method with a chart specific method.
# Write the c:lineChart element.
self._write_line_chart(args)
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_line_chart(self, args):
# Write the <c:lineChart> element.
if args['primary_axes']:
series = self._get_primary_axes_series()
else:
series = self._get_secondary_axes_series()
if not len(series):
return
self._xml_start_tag('c:lineChart')
# Write the c:grouping element.
self._write_grouping('standard')
# Write the series elements.
for data in series:
self._write_ser(data)
# Write the c:dropLines element.
self._write_drop_lines()
# Write the c:hiLowLines element.
self._write_hi_low_lines()
# Write the c:upDownBars element.
self._write_up_down_bars()
# Write the c:marker element.
self._write_marker_value()
# Write the c:axId elements
self._write_axis_ids(args)
self._xml_end_tag('c:lineChart')
def _write_d_pt_point(self, index, point):
# Write an individual <c:dPt> element. Override the parent method to
# add markers.
self._xml_start_tag('c:dPt')
# Write the c:idx element.
self._write_idx(index)
self._xml_start_tag('c:marker')
# Write the c:spPr element.
self._write_sp_pr(point)
self._xml_end_tag('c:marker')
self._xml_end_tag('c:dPt')
|
gcca/plaft
|
backend/infraestructure/xlsxwriter/chart_line.py
|
Python
|
gpl-2.0
| 2,735
|
"""NDG XACML ElementTree based Subject Element reader
NERC DataGrid
"""
__author__ = "P J Kershaw"
__date__ = "16/03/10"
__copyright__ = "(C) 2010 Science and Technology Facilities Council"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = "$Id$"
from ndg.xacml.core.subject import Subject
from ndg.xacml.parsers.etree.targetchildreader import TargetChildReader
class SubjectReader(TargetChildReader):
'''ElementTree based XACML Rule parser
@cvar TYPE: XACML type to instantiate from parsed object
@type TYPE: type
'''
TYPE = Subject
|
cedadev/ndg_xacml
|
ndg/xacml/parsers/etree/subjectreader.py
|
Python
|
bsd-3-clause
| 673
|
# -*- coding: utf-8 -*-
from rest_framework.viewsets import ModelViewSet
from .models import (
UnitNestedRouterMixinUserModel as UserModel,
UnitNestedRouterMixinGroupModel as GroupModel,
UnitNestedRouterMixinPermissionModel as PermissionModel,
)
class UserViewSet(ModelViewSet):
model = UserModel
class GroupViewSet(ModelViewSet):
model = GroupModel
class PermissionViewSet(ModelViewSet):
model = PermissionModel
|
lock8/drf-extensions
|
tests_app/tests/unit/routers/nested_router_mixin/views.py
|
Python
|
mit
| 445
|
from blob import uploadBlob
def uploadImage(username, blob, filename, token, secret, tags):
rtnBlobList = uploadBlob(username, blob, filename, token, secret)
if len(rtnBlobList) == 1:
return rtnBlobList[0]
return 'success'
def main():
print(uploadImage('fred', 'fun', 'filename','4800385332-ZbrU1XfignI2lA3MjQu7U8KbIkTdYAdj1ArMVFR','BPSs4gwICptsGVZQc9F2EpWcw6ar1gsv4Nlnqvq5PFIdF','fun'))
main()
|
rjhunter8285/nsc-cloudproject-s22016
|
prototype/api/verify_oauth & blob/uploadImage.py
|
Python
|
apache-2.0
| 426
|
"""
Classes holding information on global DOFs and mapping of all DOFs -
equations (active DOFs).
Helper functions for the equation mapping.
"""
from sfepy.base.base import *
from sfepy.base.compat import unique
from sfepy.fem.utils import compute_nodal_normals
from sfepy.fem.functions import Function
from sfepy.fem.conditions import EssentialBC
def expand_nodes_to_equations(nods, dof_names, all_dof_names):
"""
Expand vector of node indices to equations (DOF indices) based on
the DOF-per-node count.
DOF names must be already canonized.
"""
dpn = len(all_dof_names)
eq = nm.array([], dtype=nm.int32)
for dof in dof_names:
idof = all_dof_names.index(dof)
eq = nm.concatenate((eq, dpn * nods + idof))
return eq
def resolve_chains(master_slave, chains):
"""
Resolve EPBC chains - e.g. in corner nodes.
"""
for chain in chains:
slave = chain[-1]
master_slave[chain[:-1]] = slave + 1
master_slave[slave] = - chain[0] - 1 # Any of masters...
def group_chains(chain_list):
"""
Group EPBC chains.
"""
chains = []
while len(chain_list):
chain = set(chain_list.pop(0))
## print ':', chain
ii = 0
while ii < len(chain_list):
c1 = sorted(chain_list[ii])
## print '--', ii, c1, chain
is0 = c1[0] in chain
is1 = c1[1] in chain
if is0 and is1:
chain_list.pop(ii)
elif is0 or is1:
chain.update(c1)
chain_list.pop(ii)
ii = 0
else:
ii += 1
## print ii, chain, chain_list
## print '->', chain
## print chain_list
## pause()
chains.append(list(chain))
## print 'EPBC chain groups:', chains
aux = {}
for chain in chains:
aux.setdefault(len(chain), [0])[0] += 1
## print 'EPBC chain counts:', aux
return chains
class DofInfo(Struct):
"""
Global DOF information, i.e. ordering of DOFs of the state (unknown)
variables in the global state vector.
"""
def __init__(self, name):
Struct.__init__(self, name=name)
self.n_var = 0
self.var_names = []
self.n_dof = {}
self.ptr = [0]
self.indx = {}
self.details = {}
def _update_after_append(self, name):
self.ptr.append(self.ptr[-1] + self.n_dof[name])
ii = self.n_var
self.indx[name] = slice(int(self.ptr[ii]), int(self.ptr[ii+1]))
self.n_var += 1
def append_variable(self, var, active=False):
"""
Append DOFs of the given variable.
Parameters
----------
var : Variable instance
The variable to append.
active : bool, optional
When True, only active (non-constrained) DOFs are considered.
"""
name = var.name
if name in self.var_names:
raise ValueError('variable %s already present!' % name)
self.var_names.append(name)
self.n_dof[name], self.details[name] = var.get_dof_info(active=active)
self._update_after_append(name)
def append_raw(self, name, n_dof):
"""
Append raw DOFs.
Parameters
----------
name : str
The name of variable the DOFs correspond to.
n_dof : int
The number of DOFs.
"""
if name in self.var_names:
raise ValueError('variable %s already present!' % name)
self.var_names.append(name)
self.n_dof[name], self.details[name] = n_dof, None
self._update_after_append(name)
def update(self, name, n_dof):
"""
Set the number of DOFs of the given variable.
Parameters
----------
name : str
The name of variable the DOFs correspond to.
n_dof : int
The number of DOFs.
"""
if not name in self.var_names:
raise ValueError('variable %s is not present!' % name)
ii = self.var_names.index(name)
self.ptr[ii+1:] += n_dof - self.n_dof[name]
self.n_dof[name] = n_dof
for iv, nn in enumerate(self.var_names[ii:]):
self.indx[nn] = slice(self.ptr[ii+iv], self.ptr[ii+iv+1])
def get_info(self, var_name):
"""
Return information on DOFs of the given variable.
Parameters
----------
var_name : str
The name of the variable.
"""
return Struct(name = '%s_dof_info' % var_name,
var_name = var_name,
n_dof = self.n_dof[var_name],
indx = self.indx[var_name],
details = self.details[var_name])
def get_subset_info(self, var_names):
"""
Return global DOF information for selected variables
only. Silently ignores non-existing variable names.
Parameters
----------
var_names : list
The names of the selected variables.
"""
di = DofInfo(self.name + ':subset')
for var_name in var_names:
if var_name not in self.var_names:
continue
di.append_raw(var_name, self.n_dof[var_name])
return di
class EquationMap(Struct):
"""
Map all DOFs to equations for active DOFs.
"""
def __init__(self, name, dof_names, var_di):
Struct.__init__(self, name=name, dof_names=dof_names, var_di=var_di)
self.dpn = len(self.dof_names)
self.eq = nm.arange(var_di.n_dof, dtype=nm.int32)
def _init_empty(self):
self.eqi = nm.arange(self.var_di.n_dof, dtype=nm.int32)
self.eq_ebc = nm.empty((0,), dtype=nm.int32)
self.n_eq = self.eqi.shape[0]
self.n_ebc = self.eq_ebc.shape[0]
self.master = nm.empty((0,), dtype=nm.int32)
self.slave = nm.empty((0,), dtype=nm.int32)
def map_equations(self, bcs, field, ts, functions, warn=False):
"""
Create the mapping of active DOFs from/to all DOFs.
Parameters
----------
bcs : Conditions instance
The Dirichlet or periodic boundary conditions (single
condition instances). The dof names in the conditions must
already be canonized.
field : Field instance
The field of the variable holding the DOFs.
ts : TimeStepper instance
The time stepper.
functions : Functions instance
The registered functions.
warn : bool, optional
If True, warn about BC on non-existent nodes.
Notes
-----
- Periodic bc: master and slave DOFs must belong to the same
field (variables can differ, though).
"""
if bcs is None:
self.val_ebc = nm.empty((0,), dtype=field.dtype)
self._init_empty()
return
eq_ebc = nm.zeros((self.var_di.n_dof,), dtype=nm.int32)
val_ebc = nm.zeros((self.var_di.n_dof,), dtype=field.dtype)
master_slave = nm.zeros((self.var_di.n_dof,), dtype=nm.int32)
chains = []
for bc in bcs:
if isinstance(bc, EssentialBC):
ntype = 'EBC'
region = bc.region
else:
ntype = 'EPBC'
region = bc.regions[0]
## print ir, key, bc
## debug()
if warn:
clean_msg = ('warning: ignoring nonexistent' \
' %s node (%s) in ' % (ntype, self.var_di.var_name))
else:
clean_msg = None
# Get master region nodes.
master_nod_list = region.get_field_nodes(field, clean=True,
warn=clean_msg)
if len(master_nod_list) == 0:
continue
if ntype == 'EBC': # EBC.
dofs, val = bc.dofs
##
# Evaluate EBC values.
nods = nm.unique(nm.hstack(master_nod_list))
coor = field.get_coor(nods)
if type(val) == str:
fun = functions[val]
vv = fun(ts, coor, bc=bc)
elif isinstance(val, Function):
vv = val(ts, coor, bc=bc)
else:
vv = nm.repeat([val], nods.shape[0] * len(dofs))
eq = expand_nodes_to_equations(nods, dofs, self.dof_names)
# Duplicates removed here...
eq_ebc[eq] = 1
if vv is not None: val_ebc[eq] = vv
else: # EPBC.
region = bc.regions[1]
slave_nod_list = region.get_field_nodes(field, clean=True,
warn=clean_msg)
## print master_nod_list
## print slave_nod_list
nmaster = nm.unique(nm.hstack(master_nod_list))
# Treat fields not covering the whole domain.
if nmaster[0] == -1:
nmaster = nmaster[1:]
nslave = nm.unique(nm.hstack(slave_nod_list))
# Treat fields not covering the whole domain.
if nslave[0] == -1:
nslave = nslave[1:]
## print nmaster + 1
## print nslave + 1
if nmaster.shape != nslave.shape:
msg = 'EPBC list lengths do not match!\n(%s,\n %s)' %\
(nmaster, nslave)
raise ValueError(msg)
if (nmaster.shape[0] == 0) and (nslave.shape[0] == 0):
continue
mcoor = field.get_coor(nmaster)
scoor = field.get_coor(nslave)
fun = functions[bc.match]
i1, i2 = fun(mcoor, scoor)
## print nm.c_[mcoor[i1], scoor[i2]]
## print nm.c_[nmaster[i1], nslave[i2]] + 1
meq = expand_nodes_to_equations(nmaster[i1], bc.dofs[0],
self.dof_names)
seq = expand_nodes_to_equations(nslave[i2], bc.dofs[1],
self.dof_names)
m_assigned = nm.where(master_slave[meq] != 0)[0]
s_assigned = nm.where(master_slave[seq] != 0)[0]
if m_assigned.size or s_assigned.size: # Chain EPBC.
## print m_assigned, meq[m_assigned]
## print s_assigned, seq[s_assigned]
aux = master_slave[meq[m_assigned]]
sgn = nm.sign(aux)
om_chain = zip(meq[m_assigned], (aux - sgn) * sgn)
## print om_chain
chains.extend(om_chain)
aux = master_slave[seq[s_assigned]]
sgn = nm.sign(aux)
os_chain = zip(seq[s_assigned], (aux - sgn) * sgn)
## print os_chain
chains.extend(os_chain)
m_chain = zip(meq[m_assigned], seq[m_assigned])
## print m_chain
chains.extend(m_chain)
msd = nm.setdiff1d(s_assigned, m_assigned)
s_chain = zip(meq[msd], seq[msd])
## print s_chain
chains.extend(s_chain)
msa = nm.union1d(m_assigned, s_assigned)
ii = nm.setdiff1d(nm.arange(meq.size), msa)
master_slave[meq[ii]] = seq[ii] + 1
master_slave[seq[ii]] = - meq[ii] - 1
else:
master_slave[meq] = seq + 1
master_slave[seq] = - meq - 1
## print 'ms', master_slave
## print chains
## print master_slave
chains = group_chains(chains)
resolve_chains(master_slave, chains)
ii = nm.argwhere(eq_ebc == 1)
self.eq_ebc = nm.atleast_1d(ii.squeeze())
self.val_ebc = nm.atleast_1d(val_ebc[ii].squeeze())
self.master = nm.argwhere(master_slave > 0).squeeze()
self.slave = master_slave[self.master] - 1
assert_((self.eq_ebc.shape == self.val_ebc.shape))
## print self.eq_ebc.shape
## pause()
self.eq[self.eq_ebc] = -2
self.eq[self.master] = -1
self.eqi = nm.compress(self.eq >= 0, self.eq)
self.eq[self.eqi] = nm.arange(self.eqi.shape[0], dtype=nm.int32)
self.eq[self.master] = self.eq[self.slave]
self.n_eq = self.eqi.shape[0]
self.n_ebc = self.eq_ebc.shape[0]
self.n_epbc = self.master.shape[0]
class LCBCOperator(Struct):
"""
Base class for LCBC operators.
"""
def treat_pbcs(self, master_equations):
"""
Treat dofs with periodic BC.
"""
umeq, indx = unique(master_equations, return_index=True)
indx.sort()
self.mtx = self.mtx[indx]
class RigidOperator(LCBCOperator):
"""
Transformation matrix operator for rigid LCBCs.
"""
def __init__(self, name, nodes, field, dof_names, all_dof_names):
Struct.__init__(self, name=name, nodes=nodes, dof_names=dof_names)
coors = field.get_coor(nodes)
n_nod, dim = coors.shape
mtx_e = nm.tile(nm.eye(dim, dtype=nm.float64), (n_nod, 1))
if dim == 2:
mtx_r = nm.empty((dim * n_nod, 1), dtype=nm.float64)
mtx_r[0::dim,0] = -coors[:,1]
mtx_r[1::dim,0] = coors[:,0]
n_rigid_dof = 3
elif dim == 3:
mtx_r = nm.zeros((dim * n_nod, dim), dtype=nm.float64)
mtx_r[0::dim,1] = coors[:,2]
mtx_r[0::dim,2] = -coors[:,1]
mtx_r[1::dim,0] = -coors[:,2]
mtx_r[1::dim,2] = coors[:,0]
mtx_r[2::dim,0] = coors[:,1]
mtx_r[2::dim,1] = -coors[:,0]
n_rigid_dof = 6
else:
msg = 'dimension in [2, 3]: %d' % dim
raise ValueError(msg)
self.n_dof = n_rigid_dof
self.mtx = nm.hstack((mtx_r, mtx_e))
# Strip unconstrained dofs.
aux = dim * nm.arange(n_nod)
indx = [aux + all_dof_names.index(dof) for dof in dof_names]
indx = nm.array(indx).T.ravel()
self.mtx = self.mtx[indx]
def _save_normals(filename, normals, region, mesh):
nn = nm.zeros_like(mesh.coors)
nmax = region.all_vertices.shape[0]
nn[region.all_vertices] = normals[:nmax]
out = {'normals' : Struct(name = 'output_data',
mode = 'vertex', data = nn)}
mesh.write(filename, out=out, io='auto')
class NoPenetrationOperator(LCBCOperator):
"""
Transformation matrix operator for no-penetration LCBCs.
"""
def __init__(self, name, nodes, region, field, dof_names, filename=None):
Struct.__init__(self, name=name, nodes=nodes, dof_names=dof_names)
dim = field.shape[0]
assert_(len(dof_names) == dim)
normals = compute_nodal_normals(nodes, region, field)
if filename is not None:
_save_normals(filename, normals, region, field.domain.mesh)
ii = nm.abs(normals).argmax(1)
n_nod, dim = normals.shape
irs = set(range(dim))
data = []
rows = []
cols = []
for idim in xrange(dim):
ic = nm.where(ii == idim)[0]
if len(ic) == 0: continue
## print ic
## print idim
ir = list(irs.difference([idim]))
nn = nm.empty((len(ic), dim - 1), dtype=nm.float64)
for ik, il in enumerate(ir):
nn[:,ik] = - normals[ic,il] / normals[ic,idim]
irn = dim * ic + idim
ics = [(dim - 1) * ic + ik for ik in xrange(dim - 1)]
for ik in xrange(dim - 1):
rows.append(irn)
cols.append(ics[ik])
data.append(nn[:,ik])
ones = nm.ones( (nn.shape[0],), dtype = nm.float64 )
for ik, il in enumerate(ir):
rows.append(dim * ic + il)
cols.append(ics[ik])
data.append(ones)
## print rows
## print cols
## print data
rows = nm.concatenate(rows)
cols = nm.concatenate(cols)
data = nm.concatenate(data)
n_np_dof = n_nod * (dim - 1)
mtx = sp.coo_matrix((data, (rows, cols)), shape=(n_nod * dim, n_np_dof))
self.n_dof = n_np_dof
self.mtx = mtx.tocsr()
## import pylab
## from sfepy.base.plotutils import spy
## spy( mtx )
## print mtx
## pylab.show()
class NormalDirectionOperator(LCBCOperator):
"""
Transformation matrix operator for normal direction LCBCs.
The substitution (in 3D) is:
.. math::
[u_1, u_2, u_3]^T = [n_1, n_2, n_3]^T w
The new DOF is :math:`w`.
"""
def __init__(self, name, nodes, region, field, dof_names, filename=None):
Struct.__init__(self, name=name, nodes=nodes, dof_names=dof_names)
dim = field.shape[0]
assert_(len(dof_names) == dim)
normals = compute_nodal_normals(nodes, region, field)
if filename is not None:
_save_normals(filename, normals, region, field.domain.mesh)
n_nod, dim = normals.shape
data = normals.ravel()
rows = nm.arange(data.shape[0])
cols = nm.repeat(nm.arange(n_nod), dim)
mtx = sp.coo_matrix((data, (rows, cols)), shape=(n_nod * dim, n_nod))
self.n_dof = n_nod
self.mtx = mtx.tocsr()
class LCBCOperators(Container):
"""
Container holding instances of LCBCOperator subclasses for a single
variable.
Parameters
----------
name : str
The object name.
eq_map : EquationMap instance
The equation mapping of the variable.
offset : int
The offset added to markers distinguishing the individual LCBCs.
"""
def __init__(self, name, eq_map, offset):
Container.__init__(self, name=name, eq_map=eq_map, offset=offset)
self.eq_lcbc = nm.zeros((self.eq_map.n_eq,), dtype=nm.int32)
self.markers = []
self.n_transformed_dof = []
self.n_op = 0
self.ics = None
def add_from_bc(self, bc, field):
"""
Create a new LCBC operator described by `bc`, and add it to the
container.
Parameters
----------
bc : LinearCombinationBC instance
The LCBC condition description.
field : Field instance
The field of the variable.
"""
region = bc.region
dofs, kind = bc.dofs
nmaster = region.get_field_nodes(field, merge=True)
if kind == 'rigid':
op = RigidOperator('%d_rigid' % len(self),
nmaster, field, dofs, self.eq_map.dof_names)
elif kind == 'no_penetration':
filename = get_default_attr(bc, 'filename', None)
op = NoPenetrationOperator('%d_no_penetration' % len(self),
nmaster, region, field, dofs,
filename=filename)
elif kind == 'normal_direction':
filename = get_default_attr(bc, 'filename', None)
op = NormalDirectionOperator('%d_normal_direction' % len(self),
nmaster, region, field, dofs,
filename=filename)
self.append(op)
def append(self, op):
Container.append(self, op)
eq = self.eq_map.eq
meq = eq[expand_nodes_to_equations(op.nodes, op.dof_names,
self.eq_map.dof_names)]
assert_(nm.all(meq >= 0))
op.treat_pbcs(meq)
self.markers.append(self.offset + self.n_op + 1)
self.eq_lcbc[meq] = self.markers[-1]
self.n_transformed_dof.append(op.n_dof)
self.n_op = len(self)
def finalize(self):
"""
Call this after all LCBCs of the variable have been added.
Initializes the global column indices.
"""
self.ics = nm.cumsum(nm.r_[0, self.n_transformed_dof])
def make_global_lcbc_operator(lcbc_ops, adi, new_only=False):
"""
Assemble all LCBC operators into a single matrix.
Returns
-------
mtx_lc : csr_matrix
The global LCBC operator in the form of a CSR matrix.
lcdi : DofInfo
The global active LCBC-constrained DOF information.
new_only : bool
If True, the operator columns will contain only new DOFs.
"""
n_dof = adi.ptr[-1]
eq_lcbc = nm.zeros((n_dof,), dtype=nm.int32)
n_dof_new = 0
n_free = {}
n_new = {}
for var_name, lcbc_op in lcbc_ops.iteritems():
## print var_name, lcbc_op
if lcbc_op is None: continue
indx = adi.indx[var_name]
eq_lcbc[indx] = lcbc_op.eq_lcbc
n_free[var_name] = len(nm.where(lcbc_op.eq_lcbc == 0)[0])
n_new[var_name] = nm.sum(lcbc_op.n_transformed_dof)
n_dof_new += n_new[var_name]
if n_dof_new == 0:
return None, None
ii = nm.nonzero( eq_lcbc )[0]
n_constrained = ii.shape[0]
n_dof_free = n_dof - n_constrained
n_dof_reduced = n_dof_free + n_dof_new
output( 'dofs: total %d, free %d, constrained %d, new %d'\
% (n_dof, n_dof_free, n_constrained, n_dof_new) )
output( ' -> reduced %d' % (n_dof_reduced) )
lcdi = DofInfo('lcbc_active_state_dof_info')
fdi = DofInfo('free_dof_info')
ndi = DofInfo('new_dof_info')
for var_name in adi.var_names:
nf = n_free.get(var_name, adi.n_dof[var_name])
nn = n_new.get(var_name, 0)
fdi.append_raw(var_name, nf)
ndi.append_raw(var_name, nn)
lcdi.append_raw(var_name, nn + nf)
assert_(lcdi.ptr[-1] == n_dof_reduced)
rows = []
cols = []
data = []
for var_name, lcbc_op in lcbc_ops.iteritems():
if lcbc_op is None: continue
if new_only:
offset = ndi.indx[var_name].start
else:
offset = lcdi.indx[var_name].start + fdi.n_dof[var_name]
for ii, op in enumerate(lcbc_op):
indx = nm.where(eq_lcbc == lcbc_op.markers[ii])[0]
icols = nm.arange(offset + lcbc_op.ics[ii],
offset + lcbc_op.ics[ii+1])
if isinstance(op.mtx, sp.spmatrix):
lr, lc, lv = sp.find(op.mtx)
rows.append(indx[lr])
cols.append(icols[lc])
data.append(lv)
else:
irs, ics = nm.meshgrid(indx, icols)
rows.append(irs.ravel())
cols.append(ics.ravel())
data.append(op.mtx.T.ravel())
rows = nm.concatenate(rows)
cols = nm.concatenate(cols)
data = nm.concatenate(data)
if new_only:
mtx_lc = sp.coo_matrix((data, (rows, cols)),
shape=(n_dof, n_dof_new))
else:
mtx_lc = sp.coo_matrix((data, (rows, cols)),
shape=(n_dof, n_dof_reduced))
ir = nm.where( eq_lcbc == 0 )[0]
ic = nm.empty((n_dof_free,), dtype=nm.int32)
for var_name in adi.var_names:
ii = nm.arange(fdi.n_dof[var_name], dtype=nm.int32)
ic[fdi.indx[var_name]] = lcdi.indx[var_name].start + ii
mtx_lc2 = sp.coo_matrix((nm.ones((ir.shape[0],)), (ir, ic)),
shape=(n_dof, n_dof_reduced), dtype=nm.float64)
mtx_lc = mtx_lc + mtx_lc2
mtx_lc = mtx_lc.tocsr()
## import pylab
## from sfepy.base.plotutils import spy
## spy( mtx_lc )
## print mtx_lc
## pylab.show()
return mtx_lc, lcdi
|
olivierverdier/sfepy
|
sfepy/fem/dof_info.py
|
Python
|
bsd-3-clause
| 23,999
|
# -*- coding: utf-8 -*-
"""
Runs functions in pipeline to get query reuslts and does some caching.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import ubelt as ub
import utool as ut
from os.path import exists
from ibeis.algo.hots import chip_match
from ibeis.algo.hots import pipeline
(print, rrr, profile) = ut.inject2(__name__)
# TODO: Move to params
USE_HOTSPOTTER_CACHE = pipeline.USE_HOTSPOTTER_CACHE
USE_CACHE = not ut.get_argflag(('--nocache-query', '--noqcache')) and USE_HOTSPOTTER_CACHE
USE_BIGCACHE = not ut.get_argflag(('--nocache-big', '--no-bigcache-query', '--noqcache', '--nobigcache')) and ut.USE_CACHE
USE_SUPERCACHE = not ut.get_argflag(('--nocache-super', '--no-supercache-query', '--noqcache', '--nosupercache')) and ut.USE_CACHE
SAVE_CACHE = not ut.get_argflag('--nocache-save')
#MIN_BIGCACHE_BUNDLE = 20
#MIN_BIGCACHE_BUNDLE = 150
MIN_BIGCACHE_BUNDLE = 64
HOTS_BATCH_SIZE = ut.get_argval('--hots-batch-size', type_=int, default=None)
#----------------------
# Main Query Logic
#----------------------
@profile
def submit_query_request(qreq_, use_cache=None, use_bigcache=None,
verbose=None, save_qcache=None, use_supercache=None,
invalidate_supercache=None):
"""
Called from qreq_.execute
Checks a big cache for qaid2_cm. If cache miss, tries to load each cm
individually. On an individual cache miss, it preforms the query.
CommandLine:
python -m ibeis.algo.hots.match_chips4 --test-submit_query_request
Examples:
>>> # SLOW_DOCTEST
>>> # xdoctest: +SKIP
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> import ibeis
>>> qaid_list = [1]
>>> daid_list = [1, 2, 3, 4, 5]
>>> use_bigcache = True
>>> use_cache = True
>>> ibs = ibeis.opendb(db='testdb1')
>>> qreq_ = ibs.new_query_request(qaid_list, daid_list, verbose=True)
>>> cm_list = submit_query_request(qreq_=qreq_)
"""
# Get flag defaults if necessary
if verbose is None:
verbose = pipeline.VERB_PIPELINE
if use_cache is None:
use_cache = USE_CACHE
if save_qcache is None:
save_qcache = SAVE_CACHE
if use_bigcache is None:
use_bigcache = USE_BIGCACHE
if use_supercache is None:
use_supercache = USE_SUPERCACHE
# Create new query request object to store temporary state
if verbose:
#print('[mc4] --- Submit QueryRequest_ --- ')
print(ub.color_text('[mc4] --- Submit QueryRequest_ --- ', 'darkyellow'))
assert qreq_ is not None, 'query request must be prebuilt'
# Check fo empty queries
try:
assert len(qreq_.daids) > 0, 'there are no database chips'
assert len(qreq_.qaids) > 0, 'there are no query chips'
except AssertionError as ex:
ut.printex(ex, 'Impossible query request', iswarning=True,
keys=['qreq_.qaids', 'qreq_.daids'])
if ut.SUPER_STRICT:
raise
cm_list = [None for qaid in qreq_.qaids]
else:
# --- BIG CACHE ---
# Do not use bigcache single queries
is_big = len(qreq_.qaids) > MIN_BIGCACHE_BUNDLE
use_bigcache_ = (use_bigcache and use_cache and is_big)
if (use_bigcache_ or save_qcache):
cacher = qreq_.get_big_cacher()
if use_bigcache_:
try:
qaid2_cm = cacher.load()
cm_list = [qaid2_cm[qaid] for qaid in qreq_.qaids]
except (IOError, AttributeError):
pass
else:
return cm_list
# ------------
# Execute query request
qaid2_cm = execute_query_and_save_L1(qreq_, use_cache, save_qcache,
verbose=verbose,
use_supercache=use_supercache,
invalidate_supercache=invalidate_supercache)
# ------------
if save_qcache and is_big:
cacher.save(qaid2_cm)
cm_list = [qaid2_cm[qaid] for qaid in qreq_.qaids]
return cm_list
@profile
def execute_query_and_save_L1(qreq_, use_cache, save_qcache, verbose=True,
batch_size=None, use_supercache=False,
invalidate_supercache=False):
"""
Args:
qreq_ (ibeis.QueryRequest):
use_cache (bool):
Returns:
qaid2_cm
CommandLine:
python -m ibeis.algo.hots.match_chips4 execute_query_and_save_L1:0
python -m ibeis.algo.hots.match_chips4 execute_query_and_save_L1:1
python -m ibeis.algo.hots.match_chips4 execute_query_and_save_L1:2
python -m ibeis.algo.hots.match_chips4 execute_query_and_save_L1:3
Example0:
>>> # SLOW_DOCTEST
>>> # xdoctest: +SKIP
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> cfgdict1 = dict(codename='vsmany', sv_on=True)
>>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
>>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3, 4)
>>> use_cache, save_qcache, verbose = False, False, True
>>> qaid2_cm = execute_query_and_save_L1(qreq_, use_cache, save_qcache, verbose)
>>> print(qaid2_cm)
Example1:
>>> # SLOW_DOCTEST
>>> # xdoctest: +SKIP
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> cfgdict1 = dict(codename='vsone', sv_on=True)
>>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
>>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3, 4)
>>> use_cache, save_qcache, verbose = False, False, True
>>> qaid2_cm = execute_query_and_save_L1(qreq_, use_cache, save_qcache, verbose)
>>> print(qaid2_cm)
Example1:
>>> # SLOW_DOCTEST
>>> # xdoctest: +SKIP
>>> # TEST SAVE
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> import ibeis
>>> cfgdict1 = dict(codename='vsmany', sv_on=True)
>>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
>>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3, 4)
>>> use_cache, save_qcache, verbose = False, True, True
>>> qaid2_cm = execute_query_and_save_L1(qreq_, use_cache, save_qcache, verbose)
>>> print(qaid2_cm)
Example2:
>>> # SLOW_DOCTEST
>>> # xdoctest: +SKIP
>>> # TEST LOAD
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> import ibeis
>>> cfgdict1 = dict(codename='vsmany', sv_on=True)
>>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
>>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3, 4)
>>> use_cache, save_qcache, verbose = True, True, True
>>> qaid2_cm = execute_query_and_save_L1(qreq_, use_cache, save_qcache, verbose)
>>> print(qaid2_cm)
Example2:
>>> # ENABLE_DOCTEST
>>> # TEST PARTIAL HIT
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> import ibeis
>>> cfgdict1 = dict(codename='vsmany', sv_on=False, prescore_method='csum')
>>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
>>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3,
>>> 4, 5, 6,
>>> 7, 8, 9])
>>> use_cache, save_qcache, verbose = False, True, False
>>> qaid2_cm = execute_query_and_save_L1(qreq_, use_cache,
>>> save_qcache, verbose,
>>> batch_size=3)
>>> cm = qaid2_cm[1]
>>> ut.delete(cm.get_fpath(qreq_))
>>> cm = qaid2_cm[4]
>>> ut.delete(cm.get_fpath(qreq_))
>>> cm = qaid2_cm[5]
>>> ut.delete(cm.get_fpath(qreq_))
>>> cm = qaid2_cm[6]
>>> ut.delete(cm.get_fpath(qreq_))
>>> print('Re-execute')
>>> qaid2_cm_ = execute_query_and_save_L1(qreq_, use_cache,
>>> save_qcache, verbose,
>>> batch_size=3)
>>> assert all([qaid2_cm_[qaid] == qaid2_cm[qaid] for qaid in qreq_.qaids])
>>> [ut.delete(fpath) for fpath in qreq_.get_chipmatch_fpaths(qreq_.qaids)]
Ignore:
other = cm_ = qaid2_cm_[qaid]
cm = qaid2_cm[qaid]
"""
if invalidate_supercache:
dpath = qreq_.get_qresdir()
fpath_list = ut.glob('%s/*_cm_supercache_*' % (dpath, ))
for fpath in fpath_list:
ut.delete(fpath)
if use_cache:
if verbose:
print('[mc4] cache-query is on')
if use_supercache:
print('[mc4] supercache-query is on')
# Try loading as many cached results as possible
qaid2_cm_hit = {}
external_qaids = qreq_.qaids
fpath_list = list(qreq_.get_chipmatch_fpaths(external_qaids, super_qres_cache=use_supercache))
exists_flags = [exists(fpath) for fpath in fpath_list]
qaids_hit = ut.compress(external_qaids, exists_flags)
fpaths_hit = ut.compress(fpath_list, exists_flags)
fpath_iter = ut.ProgIter(
fpaths_hit, length=len(fpaths_hit), enabled=len(fpaths_hit) > 1,
label='loading cache hits', adjust=True, freq=1)
try:
cm_hit_list = [
chip_match.ChipMatch.load_from_fpath(fpath, verbose=False)
for fpath in fpath_iter
]
assert all([qaid == cm.qaid for qaid, cm in zip(qaids_hit, cm_hit_list)]), (
'inconsistent qaid and cm.qaid')
qaid2_cm_hit = {cm.qaid: cm for cm in cm_hit_list}
except chip_match.NeedRecomputeError:
print('NeedRecomputeError: Some cached chips need to recompute')
fpath_iter = ut.ProgIter(
fpaths_hit, length=len(fpaths_hit), enabled=len(fpaths_hit) > 1,
label='checking chipmatch cache', adjust=True, freq=1)
# Recompute those that fail loading
qaid2_cm_hit = {}
for fpath in fpath_iter:
try:
cm = chip_match.ChipMatch.load_from_fpath(fpath, verbose=False)
except chip_match.NeedRecomputeError:
pass
else:
qaid2_cm_hit[cm.qaid] = cm
print('%d / %d cached matches need to be recomputed' % (
len(qaids_hit) - len(qaid2_cm_hit), len(qaids_hit)))
if len(qaid2_cm_hit) == len(external_qaids):
return qaid2_cm_hit
else:
if len(qaid2_cm_hit) > 0 and not ut.QUIET:
print('... partial cm cache hit %d/%d' % (
len(qaid2_cm_hit), len(external_qaids)))
cachehit_qaids = list(qaid2_cm_hit.keys())
# mask queries that have already been executed
qreq_.set_external_qaid_mask(cachehit_qaids)
else:
if ut.VERBOSE:
print('[mc4] cache-query is off')
qaid2_cm_hit = {}
qaid2_cm = execute_query2(qreq_, verbose, save_qcache, batch_size, use_supercache)
# Merge cache hits with computed misses
if len(qaid2_cm_hit) > 0:
qaid2_cm.update(qaid2_cm_hit)
qreq_.set_external_qaid_mask(None) # undo state changes
return qaid2_cm
@profile
def execute_query2(qreq_, verbose, save_qcache, batch_size=None, use_supercache=False):
"""
Breaks up query request into several subrequests
to process "more efficiently" and safer as well.
"""
if qreq_.prog_hook is not None:
preload_hook, query_hook = qreq_.prog_hook.subdivide(spacing=[0, .15, .8])
preload_hook(0, lbl='preloading')
qreq_.prog_hook = query_hook
else:
preload_hook = None
# Load features / weights for all annotations
qreq_.lazy_preload(prog_hook=preload_hook, verbose=verbose and ut.NOT_QUIET)
all_qaids = qreq_.qaids
print('len(missed_qaids) = %r' % (len(all_qaids),))
qaid2_cm = {}
# vsone must have a chunksize of 1
if batch_size is None:
if HOTS_BATCH_SIZE is None:
hots_batch_size = qreq_.ibs.cfg.other_cfg.hots_batch_size
#hots_batch_size = 256
else:
hots_batch_size = HOTS_BATCH_SIZE
else:
hots_batch_size = batch_size
chunksize = 1 if qreq_.qparams.vsone else hots_batch_size
# Iterate over vsone queries in chunks.
n_total_chunks = ut.get_num_chunks(len(all_qaids), chunksize)
qaid_chunk_iter = ut.ichunks(all_qaids, chunksize)
_qreq_iter = (qreq_.shallowcopy(qaids=qaids) for qaids in qaid_chunk_iter)
sub_qreq_iter = ut.ProgIter(_qreq_iter, length=n_total_chunks, freq=1,
label='[mc4] query chunk: ',
prog_hook=qreq_.prog_hook)
for sub_qreq_ in sub_qreq_iter:
if ut.VERBOSE:
print('Generating vsmany chunk')
sub_cm_list = pipeline.request_ibeis_query_L0(qreq_.ibs, sub_qreq_,
verbose=verbose)
assert len(sub_qreq_.qaids) == len(sub_cm_list), 'not aligned'
assert all([qaid == cm.qaid for qaid, cm in
zip(sub_qreq_.qaids, sub_cm_list)]), 'not corresonding'
if save_qcache:
fpath_list = list(qreq_.get_chipmatch_fpaths(sub_qreq_.qaids, super_qres_cache=use_supercache))
_iter = zip(sub_cm_list, fpath_list)
_iter = ut.ProgIter(_iter, length=len(sub_cm_list),
label='saving chip matches', adjust=True, freq=1)
for cm, fpath in _iter:
cm.save_to_fpath(fpath, verbose=False)
else:
if ut.VERBOSE:
print('[mc4] not saving vsmany chunk')
qaid2_cm.update({cm.qaid: cm for cm in sub_cm_list})
return qaid2_cm
if __name__ == '__main__':
"""
python -m ibeis.algo.hots.match_chips4
python -m ibeis.algo.hots.match_chips4 --allexamples --testslow
"""
import multiprocessing
multiprocessing.freeze_support()
ut.doctest_funcs()
|
Erotemic/ibeis
|
ibeis/algo/hots/match_chips4.py
|
Python
|
apache-2.0
| 14,348
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flag related helpers for sole tenancy related commands."""
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute import scope as compute_scope
SOLE_TENANCY_HOST_TYPE_RESOLVER = compute_flags.ResourceResolver.FromMap(
'sole tenancy host type', {
compute_scope.ScopeEnum.ZONE: 'compute.hostTypes'})
|
Sorsly/subtle
|
google-cloud-sdk/lib/googlecloudsdk/command_lib/compute/sole_tenancy/sole_tenancy_hosts/flags.py
|
Python
|
mit
| 967
|
import config
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
FLAGS = tf.app.flags.FLAGS
class Model:
def __init__(self, inputs, is_training, keep_prob):
self.inputs = inputs
self.is_training = is_training
self.keep_prob = keep_prob
self.logits = self._init_model()
def _init_model(self):
if FLAGS.conv == 'inception':
print('Using Inception model')
net = self._inception_cnn(self.inputs)
elif FLAGS.conv == 'vgg16':
print('Using VGG16 model')
net = self._vgg16(self.inputs)
else:
print('Using common cnn block')
net = self._cnn(self.inputs)
rnn = self._rnn_cell(net)
return self._dense(rnn)
def _cnn(self, input):
with slim.arg_scope([slim.conv2d], stride=1,
weights_initializer=tf.contrib.layers.xavier_initializer_conv2d(),
trainable=self.is_training):
with tf.variable_scope('Convolution', [input]):
net = slim.conv2d(input, 32, [1, 1], stride=2, scope='Conv1',
normalizer_fn=slim.batch_norm,
normalizer_params={'is_training': self.is_training})
net = slim.max_pool2d(net, [3, 3], scope='Pool1', stride=1)
net = slim.conv2d(net, 32, [3, 3], scope='Conv2')
net = slim.dropout(net, self.keep_prob, scope='Dropout')
net = slim.max_pool2d(net, [3, 3], scope='Pool2', stride=1)
net = slim.conv2d(net, 32, [3, 3], stride=2, scope='Conv3')
return net
def _inception_cnn(self, inputs):
conv1 = slim.conv2d(inputs, 32, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3')
conv2 = slim.conv2d(conv1, 32, [3, 3], stride=2, padding='VALID', scope='Conv2d_2a_3x3')
inc_inputs = slim.conv2d(conv2, 64, [3, 3], scope='Conv2d_2b_3x3')
with slim.arg_scope([slim.conv2d], trainable=self.is_training, stride=1, padding='SAME'):
with slim.arg_scope([slim.avg_pool2d, slim.max_pool2d], stride=1, padding='SAME'):
with tf.variable_scope('BlockInceptionA', [inc_inputs]):
with tf.variable_scope('IBranch_0'):
ibranch_0 = slim.conv2d(inc_inputs, 96, [1, 1], scope='IConv2d_0a_1x1')
with tf.variable_scope('IBranch_1'):
ibranch_1_conv1 = slim.conv2d(inc_inputs, 64, [1, 1], scope='IConv2d_0a_1x1')
ibranch_1 = slim.conv2d(ibranch_1_conv1, 96, [3, 3], scope='IConv2d_0b_3x3')
with tf.variable_scope('IBranch_2'):
ibranch_2_conv1 = slim.conv2d(inc_inputs, 64, [1, 1], scope='IConv2d_0a_1x1')
ibranch_2_conv2 = slim.conv2d(ibranch_2_conv1, 96, [3, 3], scope='IConv2d_0b_3x3')
ibranch_2 = slim.conv2d(ibranch_2_conv2, 96, [3, 3], scope='IConv2d_0c_3x3')
with tf.variable_scope('IBranch_3'):
ibranch_3_pool = slim.avg_pool2d(inc_inputs, [3, 3], scope='IAvgPool_0a_3x3')
ibranch_3 = slim.conv2d(ibranch_3_pool, 96, [1, 1], scope='IConv2d_0b_1x1')
inception = tf.concat(axis=3, values=[ibranch_0, ibranch_1, ibranch_2, ibranch_3])
with tf.variable_scope('BlockReductionA', [inception]):
with tf.variable_scope('RBranch_0'):
rbranch_0 = slim.conv2d(inception, 384, [3, 3], stride=2, padding='VALID',
scope='RConv2d_1a_3x3')
with tf.variable_scope('RBranch_1'):
rbranch_1_conv1 = slim.conv2d(inception, 192, [1, 1], scope='RConv2d_0a_1x1')
rbranch_1_conv2 = slim.conv2d(rbranch_1_conv1, 224, [3, 3], scope='RConv2d_0b_3x3')
rbranch_1 = slim.conv2d(rbranch_1_conv2, 256, [3, 3], stride=2, padding='VALID',
scope='RConv2d_1a_3x3')
with tf.variable_scope('RBranch_2'):
rbranch_2 = slim.max_pool2d(inception, [3, 3], stride=2, padding='VALID',
scope='RMaxPool_1a_3x3')
return tf.concat(axis=3, values=[rbranch_0, rbranch_1, rbranch_2])
def _vgg16(self, inputs):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
trainable=self.is_training,
weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.0005)):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
net = slim.fully_connected(net, 4096, scope='fc6')
net = slim.dropout(net, self.keep_prob, scope='dropout6')
net = slim.fully_connected(net, 4096, scope='fc7')
net = slim.dropout(net, self.keep_prob, scope='dropout7')
net = slim.fully_connected(net, 1000, activation_fn=None, scope='fc8')
return net
@staticmethod
def _rnn_cell(net):
with tf.variable_scope('RNN_cell'):
size = np.prod(net.get_shape().as_list()[1:])
rnn_inputs = tf.reshape(net, (-1, FLAGS.esize, size))
if FLAGS.rnn == 'LSTM':
cell = tf.contrib.rnn.LSTMCell(100)
else:
cell = tf.contrib.rnn.GRUCell(100)
init_state = cell.zero_state(1, dtype=tf.float32)
rnn_outputs, _ = tf.nn.dynamic_rnn(cell, rnn_inputs, initial_state=init_state)
return tf.reduce_mean(rnn_outputs, axis=1)
@staticmethod
def _dense(output):
with tf.name_scope('Dense'):
return slim.fully_connected(output, 6, scope="dense")
|
donfaq/cnn-rnn
|
network/model.py
|
Python
|
mit
| 6,648
|
# MIT License
# Copyright (c) 2017 Tuxedo
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from ctypes import *
class _LandMarks(Structure):
_fields_ = [('x',c_int*5),('y',c_int*5)]
class _Face(Structure):
pass
_Face._fields_ = [
('left',c_int),
('top',c_int),
('right',c_int),
('bottom',c_int),
('score',c_double),
('next',POINTER(_Face))]
class Face(object):
def __init__(self):
self.x = 0
self.y = 0
self.w = 0
self.h = 0
class _Image(Structure):
_fields_ = [
('data', c_void_p),
('width',c_int),
('height',c_int),
('channels',c_int)]
|
TuXiaokang/pyseeta
|
pyseeta/common.py
|
Python
|
mit
| 1,657
|
#!/usr/bin/env python
"""moose_methods.py: Some helper function related with moose to do multiscale
modelling.
Last modified: Thu Jun 05, 2014 01:20AM
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, NCBS Bangalore"
__credits__ = ["NCBS Bangalore", "Bhalla Lab"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import re
import os
import moose
import debug.debug as debug
nameSep = '_'
def toFloat(string):
if type(string) == float:
return string
elif type(string) == str:
return stringToFloat(string)
else:
raise RuntimeError("Converting type %s to float" % type(str))
def commonPath(pathA, pathB):
''' Find common path at the beginning of two paths. '''
a = pathA.split('/')
b = pathB.split('/')
common = []
for (i, p) in enumerate(a):
if a[i] == b[i]:
common.append(p)
else:
return '/'.join(common)
return '/'.join(common)
def moosePath(baseName, append):
"""
Append instance index to basename
TODO: Ideally - should be replace with [ and ]
"""
if append.isdigit():
if len(nameSep) == 1:
return baseName + nameSep + append
elif len(nameSep) == 2:
return baseName + nameSep[0] + append + nameSep[1]
else:
raise UserWarning, "Not more than 2 characters are not supported"
else:
return "{}/{}".format(baseName, append)
def splitComparmentExpr(expr):
""" Breaks compartment expression into name and id.
"""
if len(nameSep) == 1:
p = re.compile(r'(?P<name>[\w\/\d]+)\{0}(?P<id>\d+)'.format(nameSep[0]))
else:
# We have already verified that nameSep is no longer than 2 characters.
a, b = nameSep
p = re.compile(r'(?P<name>[\w\/\d]+)\{0}(?P<id>\d+)\{1}'.format(a, b))
m = p.match(expr)
assert m.group('id').isdigit() == True
return m.group('name'), m.group('id')
def getCompartmentId(compExpr):
"""Get the id of compartment.
"""
return splitComparmentExpr(compExpr)[1]
def getCompName(compExpr):
"""Get the name of compartment.
"""
return splitComparmentExpr(compExpr)[0]
def stringToFloat(text):
text = text.strip()
if not text:
return 0.0
try:
val = float(text)
return val
except Exception:
raise UserWarning, "Failed to convert {0} to float".format(text)
def dumpMoosePaths(pat, isRoot=True):
''' Path is pattern '''
moose_paths = getMoosePaths(pat, isRoot)
return "\n\t{0}".format(moose_paths)
def getMoosePaths(pat, isRoot=True):
''' Return a list of paths for a given pattern. '''
if type(pat) != str:
pat = pat.path
assert type(pat) == str
moose_paths = [x.path for x in moose.wildcardFind(pat)]
return moose_paths
def dumpMatchingPaths(path, pat='/##'):
''' return the name of path which the closely matched with given path
pattern pat is optional.
'''
a = path.split('/')
start = a.pop(0)
p = moose.wildcardFind(start+'/##')
common = []
while len(p) > 0:
common.append(p)
start = start+'/'+a.pop(0)
p = moose.wildcardFind(start+'/##')
if len(common) > 1:
matchedPaths = [x.getPath() for x in common[-1]]
else:
matchedPaths = []
return '\n\t'+('\n\t'.join(matchedPaths))
def dumpFieldName(path, whichInfo='valueF'):
print path.getFieldNames(whichInfo+'info')
def writeGraphviz(pat='/##', filename=None, filterList=[]):
'''This is a generic function. It takes the the pattern, search for paths
and write a graphviz file.
'''
def ignore(line):
for f in filterList:
if f in line:
return True
return False
pathList = getMoosePaths(pat)
dot = []
dot.append("digraph G {")
dot.append("\tconcentrate=true")
for p in pathList:
if ignore(p):
continue
else:
p = p.translate(None, '[]()')
dot.append('\t'+' -> '.join(filter(None, p.split('/'))))
dot.append('}')
dot = '\n'.join(dot)
if not filename:
print(dot)
else:
with open(filename, 'w') as graphviz:
debug.printDebug("INFO"
, "Writing topology to file {}".format(filename)
)
graphviz.write(dot)
return
def setupTable(name, obj, qtyname, tablePath=None, threshold=None):
'''This is replacement function for moose.utils.setupTable
It stores qtyname from obj.
'''
assert qtyname[0].isupper(), "First character must be uppercase character"
debug.printDebug("DEBUG"
, "Setting up table for: {} -> get{}".format(obj.path, qtyname)
)
if tablePath is None:
tablePath = '{}/{}'.format(obj.path, 'data')
debug.printDebug("WARN"
, "Using default table path: {}".format(tablePath)
, frame = inspect.currentframe()
)
if not moose.exists(obj.path):
raise RuntimeError("Unknown path {}".format(obj.path))
moose.Neutral(tablePath)
table = moose.Table('{}/{}'.format(tablePath, name))
if threshold is None:
moose.connect(table, "requestOut", obj, "get{}".format(qtyname))
else:
raise UserWarning("TODO: Table with threshold is not implemented yet")
return table
|
dilawar/moose-full
|
moose-core/python/libmumbl/helper/moose_methods.py
|
Python
|
gpl-2.0
| 5,572
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from boto import exception as boto_exception
import mock
from neutronclient.common import exceptions as neutron_exceptions
from rally.common import utils
from rally.plugins.openstack.cleanup import resources
from tests.unit import test
BASE = "rally.plugins.openstack.cleanup.resources"
class SynchronizedDeletionTestCase(test.TestCase):
def test_is_deleted(self):
self.assertTrue(resources.SynchronizedDeletion().is_deleted())
class QuotaMixinTestCase(test.TestCase):
def test_id(self):
quota = resources.QuotaMixin()
quota.raw_resource = mock.MagicMock()
self.assertEqual(quota.raw_resource, quota.id())
def test_name(self):
quota = resources.QuotaMixin()
quota.raw_resource = mock.MagicMock()
self.assertIsNone(quota.name())
def test_delete(self):
quota = resources.QuotaMixin()
mock_manager = mock.MagicMock()
quota._manager = lambda: mock_manager
quota.raw_resource = mock.MagicMock()
quota.delete()
mock_manager.delete.assert_called_once_with(quota.raw_resource)
def test_list(self):
quota = resources.QuotaMixin()
quota.tenant_uuid = None
self.assertEqual([], quota.list())
quota.tenant_uuid = mock.MagicMock()
self.assertEqual([quota.tenant_uuid], quota.list())
class NovaServerTestCase(test.TestCase):
def test_list(self):
server = resources.NovaServer()
server._manager = mock.MagicMock()
server.list()
server._manager.return_value.list.assert_called_once_with(limit=-1)
def test_list_old_novaclient(self):
servers = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock(),
mock.MagicMock()]
server = resources.NovaServer()
server._manager = mock.MagicMock()
server._manager.return_value.api = None
server._manager.return_value.list.side_effect = (
servers[:2], servers[2:4], [])
self.assertEqual(servers, server.list())
self.assertEqual(
[mock.call(marker=None), mock.call(marker=servers[1].id),
mock.call(marker=servers[3].id)],
server._manager.return_value.list.call_args_list)
def test_delete(self):
server = resources.NovaServer()
server.raw_resource = mock.Mock()
server._manager = mock.Mock()
server.delete()
server._manager.return_value.delete.assert_called_once_with(
server.raw_resource.id)
def test_delete_locked(self):
server = resources.NovaServer()
server.raw_resource = mock.Mock()
setattr(server.raw_resource, "OS-EXT-STS:locked", True)
server._manager = mock.Mock()
server.delete()
server.raw_resource.unlock.assert_called_once_with()
server._manager.return_value.delete.assert_called_once_with(
server.raw_resource.id)
class NovaFloatingIPsTestCase(test.TestCase):
def test_name(self):
fips = resources.NovaFloatingIPs()
fips.raw_resource = mock.MagicMock()
self.assertIsNone(fips.name())
class NovaSecurityGroupTestCase(test.TestCase):
@mock.patch("%s.base.ResourceManager._manager" % BASE)
def test_list(self, mock_resource_manager__manager):
secgroups = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock()]
secgroups[0].name = "a"
secgroups[1].name = "b"
secgroups[2].name = "default"
mock_resource_manager__manager().list.return_value = secgroups
self.assertSequenceEqual(secgroups[:2],
resources.NovaSecurityGroup().list())
class NovaFloatingIpsBulkTestCase(test.TestCase):
def test_id(self):
ip_range = resources.NovaFloatingIpsBulk()
ip_range.raw_resource = mock.MagicMock()
self.assertEqual(ip_range.raw_resource.address, ip_range.id())
def test_name(self):
fips = resources.NovaFloatingIpsBulk()
fips.raw_resource = mock.MagicMock()
self.assertIsNone(fips.name())
@mock.patch("%s.base.ResourceManager._manager" % BASE)
@mock.patch("rally.common.utils.name_matches_object")
def test_list(self, mock_name_matches_object,
mock_resource_manager__manager):
ip_range = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock()]
ip_range[0].pool = "a"
ip_range[1].pool = "rally_fip_pool_a"
ip_range[2].pool = "rally_fip_pool_b"
mock_name_matches_object.side_effect = (lambda n, o:
n.startswith("rally"))
mock_resource_manager__manager().list.return_value = ip_range
self.assertEqual(ip_range[1:], resources.NovaFloatingIpsBulk().list())
class NovaNetworksTestCase(test.TestCase):
def test_name(self):
network = resources.NovaNetworks()
network.raw_resource = mock.MagicMock()
self.assertEqual(network.raw_resource.label, network.name())
@mock.patch("rally.common.plugin.discover.itersubclasses")
def test_list(self, mock_itersubclasses):
nova_nets = resources.NovaNetworks()
networks = [mock.Mock(label="rally_abcdefgh_12345678"),
mock.Mock(label="rally_12345678_abcdefgh"),
mock.Mock(label="foobar")]
nova_nets._manager = mock.Mock()
nova_nets._manager.return_value.list.return_value = networks
mock_itersubclasses.return_value = iter(
[utils.RandomNameGeneratorMixin])
self.assertEqual(networks[:2], nova_nets.list())
nova_nets._manager.return_value.list.assert_called_once_with()
mock_itersubclasses.assert_called_once_with(
utils.RandomNameGeneratorMixin)
class EC2MixinTestCase(test.TestCase):
def get_ec2_mixin(self):
ec2 = resources.EC2Mixin()
ec2._service = "ec2"
return ec2
def test__manager(self):
ec2 = self.get_ec2_mixin()
ec2.user = mock.MagicMock()
self.assertEqual(ec2.user.ec2.return_value, ec2._manager())
class EC2ServerTestCase(test.TestCase):
@mock.patch("%s.EC2Server._manager" % BASE)
def test_is_deleted(self, mock_ec2_server__manager):
raw_res1 = mock.MagicMock(state="terminated")
raw_res2 = mock.MagicMock(state="terminated")
resource = mock.MagicMock(id="test_id")
manager = resources.EC2Server(resource=resource)
mock_ec2_server__manager().get_only_instances.return_value = [raw_res1]
self.assertTrue(manager.is_deleted())
raw_res1.state = "running"
self.assertFalse(manager.is_deleted())
mock_ec2_server__manager().get_only_instances.return_value = [
raw_res1, raw_res2]
self.assertFalse(manager.is_deleted())
raw_res1.state = "terminated"
self.assertTrue(manager.is_deleted())
mock_ec2_server__manager().get_only_instances.return_value = []
self.assertTrue(manager.is_deleted())
@mock.patch("%s.EC2Server._manager" % BASE)
def test_is_deleted_exceptions(self, mock_ec2_server__manager):
mock_ec2_server__manager.side_effect = [
boto_exception.EC2ResponseError(
status="fake", reason="fake",
body={"Error": {"Code": "fake_code"}}),
boto_exception.EC2ResponseError(
status="fake", reason="fake",
body={"Error": {"Code": "InvalidInstanceID.NotFound"}})
]
manager = resources.EC2Server(resource=mock.MagicMock())
self.assertFalse(manager.is_deleted())
self.assertTrue(manager.is_deleted())
@mock.patch("%s.EC2Server._manager" % BASE)
def test_delete(self, mock_ec2_server__manager):
resource = mock.MagicMock(id="test_id")
manager = resources.EC2Server(resource=resource)
manager.delete()
mock_ec2_server__manager().terminate_instances.assert_called_once_with(
instance_ids=["test_id"])
@mock.patch("%s.EC2Server._manager" % BASE)
def test_list(self, mock_ec2_server__manager):
manager = resources.EC2Server()
mock_ec2_server__manager().get_only_instances.return_value = [
"a", "b", "c"]
self.assertEqual(["a", "b", "c"], manager.list())
class NeutronMixinTestCase(test.TestCase):
def get_neutron_mixin(self):
neut = resources.NeutronMixin()
neut._service = "neutron"
return neut
def test_manager(self):
neut = self.get_neutron_mixin()
neut.user = mock.MagicMock()
self.assertEqual(neut.user.neutron.return_value, neut._manager())
@mock.patch("%s.NeutronMixin._manager" % BASE)
def test_supports_extension(self, mock__manager):
mock__manager().list_extensions.return_value = {
"extensions": [{"alias": "foo"}, {"alias": "bar"}]
}
neut = self.get_neutron_mixin()
self.assertTrue(neut.supports_extension("foo"))
self.assertTrue(neut.supports_extension("bar"))
self.assertFalse(neut.supports_extension("foobar"))
def test_id(self):
neut = self.get_neutron_mixin()
neut.raw_resource = {"id": "test"}
self.assertEqual("test", neut.id())
def test_name(self):
neutron = self.get_neutron_mixin()
neutron.raw_resource = {"id": "test_id", "name": "test_name"}
self.assertEqual("test_name", neutron.name())
def test_delete(self):
neut = self.get_neutron_mixin()
neut.user = mock.MagicMock()
neut._resource = "some_resource"
neut.raw_resource = {"id": "42"}
neut.delete()
neut.user.neutron().delete_some_resource.assert_called_once_with("42")
def test_list(self):
neut = self.get_neutron_mixin()
neut.user = mock.MagicMock()
neut._resource = "some_resource"
neut.tenant_uuid = "user_tenant"
some_resources = [{"tenant_id": neut.tenant_uuid}, {"tenant_id": "a"}]
neut.user.neutron().list_some_resources.return_value = {
"some_resources": some_resources
}
self.assertEqual([some_resources[0]], list(neut.list()))
neut.user.neutron().list_some_resources.assert_called_once_with(
tenant_id=neut.tenant_uuid)
class NeutronLbaasV1MixinTestCase(test.TestCase):
def get_neutron_lbaasv1_mixin(self, extensions=None):
if extensions is None:
extensions = []
neut = resources.NeutronLbaasV1Mixin()
neut._service = "neutron"
neut._resource = "some_resource"
neut._manager = mock.Mock()
neut._manager().list_extensions.return_value = {
"extensions": [{"alias": ext} for ext in extensions]
}
return neut
def test_list_lbaas_available(self):
neut = self.get_neutron_lbaasv1_mixin(extensions=["lbaas"])
neut.tenant_uuid = "user_tenant"
some_resources = [{"tenant_id": neut.tenant_uuid}, {"tenant_id": "a"}]
neut._manager().list_some_resources.return_value = {
"some_resources": some_resources
}
self.assertEqual([some_resources[0]], list(neut.list()))
neut._manager().list_some_resources.assert_called_once_with(
tenant_id=neut.tenant_uuid)
def test_list_lbaas_unavailable(self):
neut = self.get_neutron_lbaasv1_mixin()
self.assertEqual([], list(neut.list()))
self.assertFalse(neut._manager().list_some_resources.called)
class NeutronPortTestCase(test.TestCase):
def test_delete(self):
raw_res = {"device_owner": "abbabaab", "id": "some_id"}
user = mock.MagicMock()
resources.NeutronPort(resource=raw_res, user=user).delete()
user.neutron().delete_port.assert_called_once_with(raw_res["id"])
def test_delete_port_raise_exception(self):
raw_res = {"device_owner": "abbabaab", "id": "some_id"}
user = mock.MagicMock()
user.neutron().delete_port.side_effect = (
neutron_exceptions.PortNotFoundClient)
resources.NeutronPort(resource=raw_res, user=user).delete()
user.neutron().delete_port.assert_called_once_with(raw_res["id"])
def test_delete_port_device_owner(self):
raw_res = {
"device_owner": "network:router_interface",
"id": "some_id",
"device_id": "dev_id"
}
user = mock.MagicMock()
resources.NeutronPort(resource=raw_res, user=user).delete()
user.neutron().remove_interface_router.assert_called_once_with(
raw_res["device_id"], {"port_id": raw_res["id"]})
class NeutronQuotaTestCase(test.TestCase):
@mock.patch("%s.NeutronQuota._manager" % BASE)
def test_delete(self, mock_neutron_quota__manager):
user = mock.MagicMock()
resources.NeutronQuota(user=user, tenant_uuid="fake").delete()
mock_neutron_quota__manager().delete_quota.assert_called_once_with(
"fake")
def test__manager(self):
admin = mock.MagicMock(neutron=mock.Mock(return_value="foo"))
res = resources.NeutronQuota(admin=admin, tenant_uuid="fake")
res._manager()
self.assertEqual("foo", getattr(admin, res._service)())
class GlanceImageTestCase(test.TestCase):
@mock.patch("%s.GlanceImage._manager" % BASE)
def test_list(self, mock_glance_image__manager):
glance = resources.GlanceImage()
glance.tenant_uuid = mock.MagicMock()
mock_glance_image__manager().list.return_value = ["a", "b", "c"]
self.assertEqual(["a", "b", "c"], glance.list())
mock_glance_image__manager().list.assert_called_once_with(
owner=glance.tenant_uuid)
class CeilometerTestCase(test.TestCase):
def test_id(self):
ceil = resources.CeilometerAlarms()
ceil.raw_resource = mock.MagicMock()
self.assertEqual(ceil.raw_resource.alarm_id, ceil.id())
@mock.patch("%s.CeilometerAlarms._manager" % BASE)
def test_list(self, mock_ceilometer_alarms__manager):
ceil = resources.CeilometerAlarms()
ceil.tenant_uuid = mock.MagicMock()
mock_ceilometer_alarms__manager().list.return_value = ["a", "b", "c"]
mock_ceilometer_alarms__manager.reset_mock()
self.assertEqual(["a", "b", "c"], ceil.list())
mock_ceilometer_alarms__manager().list.assert_called_once_with(
q=[{"field": "project_id", "op": "eq", "value": ceil.tenant_uuid}])
class ZaqarQueuesTestCase(test.TestCase):
def test_list(self):
user = mock.Mock()
zaqar = resources.ZaqarQueues(user=user)
zaqar.list()
user.zaqar().queues.assert_called_once_with()
class KeystoneMixinTestCase(test.TestCase):
def test_is_deleted(self):
self.assertTrue(resources.KeystoneMixin().is_deleted())
def get_keystone_mixin(self):
kmixin = resources.KeystoneMixin()
kmixin._service = "keystone"
return kmixin
@mock.patch("%s.keystone_wrapper.wrap" % BASE)
def test_manager(self, mock_wrap):
keystone_mixin = self.get_keystone_mixin()
keystone_mixin.admin = mock.MagicMock()
self.assertEqual(mock_wrap.return_value, keystone_mixin._manager())
mock_wrap.assert_called_once_with(
keystone_mixin.admin.keystone.return_value)
@mock.patch("%s.keystone_wrapper.wrap" % BASE)
def test_delete(self, mock_wrap):
keystone_mixin = self.get_keystone_mixin()
keystone_mixin._resource = "some_resource"
keystone_mixin.id = lambda: "id_a"
keystone_mixin.admin = mock.MagicMock()
keystone_mixin.delete()
mock_wrap.assert_called_once_with(
keystone_mixin.admin.keystone.return_value)
mock_wrap().delete_some_resource.assert_called_once_with("id_a")
@mock.patch(
"rally.plugins.openstack.scenarios.keystone.utils.is_temporary")
@mock.patch("%s.keystone_wrapper.wrap" % BASE)
def test_list(self, mock_wrap, mock_is_temporary):
keystone_mixin = self.get_keystone_mixin()
keystone_mixin._resource = "some_resource2"
keystone_mixin.admin = mock.MagicMock()
result = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock()]
mock_is_temporary.side_effect = [True, True, False]
mock_wrap().list_some_resource2s.return_value = result
self.assertSequenceEqual(result[:2], keystone_mixin.list())
mock_wrap().list_some_resource2s.assert_called_once_with()
mock_is_temporary.assert_has_calls([mock.call(r) for r in result])
class SwiftMixinTestCase(test.TestCase):
def get_swift_mixin(self):
swift_mixin = resources.SwiftMixin()
swift_mixin._service = "swift"
return swift_mixin
def test_manager(self):
swift_mixin = self.get_swift_mixin()
swift_mixin.user = mock.MagicMock()
self.assertEqual(swift_mixin.user.swift.return_value,
swift_mixin._manager())
def test_id(self):
swift_mixin = self.get_swift_mixin()
swift_mixin.raw_resource = mock.MagicMock()
self.assertEqual(swift_mixin.raw_resource, swift_mixin.id())
def test_name(self):
swift = self.get_swift_mixin()
swift.raw_resource = ["name1", "name2"]
self.assertEqual("name2", swift.name())
def test_delete(self):
swift_mixin = self.get_swift_mixin()
swift_mixin.user = mock.MagicMock()
swift_mixin._resource = "some_resource"
swift_mixin.raw_resource = mock.MagicMock()
swift_mixin.delete()
swift_mixin.user.swift().delete_some_resource.assert_called_once_with(
*swift_mixin.raw_resource)
class SwiftObjectTestCase(test.TestCase):
@mock.patch("%s.SwiftMixin._manager" % BASE)
def test_list(self, mock_swift_mixin__manager):
containers = [mock.MagicMock(), mock.MagicMock()]
objects = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock()]
mock_swift_mixin__manager().get_account.return_value = (
"header", containers)
mock_swift_mixin__manager().get_container.return_value = (
"header", objects)
self.assertEqual(len(containers),
len(resources.SwiftContainer().list()))
self.assertEqual(len(containers) * len(objects),
len(resources.SwiftObject().list()))
class SwiftContainerTestCase(test.TestCase):
@mock.patch("%s.SwiftMixin._manager" % BASE)
def test_list(self, mock_swift_mixin__manager):
containers = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock()]
mock_swift_mixin__manager().get_account.return_value = (
"header", containers)
self.assertEqual(len(containers),
len(resources.SwiftContainer().list()))
class ManilaShareTestCase(test.TestCase):
def test_list(self):
share_resource = resources.ManilaShare()
share_resource._manager = mock.MagicMock()
share_resource.list()
self.assertEqual("shares", share_resource._resource)
share_resource._manager.return_value.list.assert_called_once_with()
def test_delete(self):
share_resource = resources.ManilaShare()
share_resource._manager = mock.MagicMock()
share_resource.id = lambda: "fake_id"
share_resource.delete()
self.assertEqual("shares", share_resource._resource)
share_resource._manager.return_value.delete.assert_called_once_with(
"fake_id")
class ManilaShareNetworkTestCase(test.TestCase):
def test_list(self):
sn_resource = resources.ManilaShareNetwork()
sn_resource._manager = mock.MagicMock()
sn_resource.list()
self.assertEqual("share_networks", sn_resource._resource)
sn_resource._manager.return_value.list.assert_called_once_with()
def test_delete(self):
sn_resource = resources.ManilaShareNetwork()
sn_resource._manager = mock.MagicMock()
sn_resource.id = lambda: "fake_id"
sn_resource.delete()
self.assertEqual("share_networks", sn_resource._resource)
sn_resource._manager.return_value.delete.assert_called_once_with(
"fake_id")
class ManilaSecurityServiceTestCase(test.TestCase):
def test_list(self):
ss_resource = resources.ManilaSecurityService()
ss_resource._manager = mock.MagicMock()
ss_resource.list()
self.assertEqual("security_services", ss_resource._resource)
ss_resource._manager.return_value.list.assert_called_once_with()
def test_delete(self):
ss_resource = resources.ManilaSecurityService()
ss_resource._manager = mock.MagicMock()
ss_resource.id = lambda: "fake_id"
ss_resource.delete()
self.assertEqual("security_services", ss_resource._resource)
ss_resource._manager.return_value.delete.assert_called_once_with(
"fake_id")
class FuelEnvironmentTestCase(test.TestCase):
def test_id(self):
fres = resources.FuelEnvironment()
fres.raw_resource = {"id": 42, "name": "chavez"}
self.assertEqual(42, fres.id())
def test_name(self):
fuel = resources.FuelEnvironment()
fuel.raw_resource = {"id": "test_id", "name": "test_name"}
self.assertEqual("test_name", fuel.name())
@mock.patch("%s.FuelEnvironment._manager" % BASE)
def test_is_deleted(self, mock__manager):
mock__manager.return_value.get.return_value = None
fres = resources.FuelEnvironment()
fres.id = mock.Mock()
self.assertTrue(fres.is_deleted())
mock__manager.return_value.get.return_value = "env"
self.assertFalse(fres.is_deleted())
mock__manager.return_value.get.assert_called_with(fres.id.return_value)
@mock.patch("%s.FuelEnvironment._manager" % BASE)
@mock.patch("rally.common.utils.name_matches_object")
def test_list(self, mock_name_matches_object, mock__manager):
envs = [{"name": "rally_one"}, {"name": "rally_two"},
{"name": "three"}]
mock__manager.return_value.list.return_value = envs
mock_name_matches_object.side_effect = (
lambda n, o: n.startswith("rally_"))
fres = resources.FuelEnvironment()
self.assertEqual(envs[:-1], fres.list())
|
amit0701/rally
|
tests/unit/plugins/openstack/cleanup/test_resources.py
|
Python
|
apache-2.0
| 23,018
|
from .. channel_order import ChannelOrder
from . base import SPIBase
from ... colors import gamma
SPI_SPEED_ERROR = 'WS2801 requires an SPI speed of 1MHz but was set to {}MHz'
class WS2801(SPIBase):
"""Main driver for WS2801 based LED strips on devices like the
Raspberry Pi and BeagleBone
Provides the same parameters as
:py:class:`bibliopixel.drivers.SPI.SPIBase`
"""
def __init__(self, num, gamma=gamma.WS2801, spi_speed=1, **kwargs):
if not (0 < spi_speed <= 1):
raise ValueError(SPI_SPEED_ERROR.format(spi_speed))
super().__init__(num, gamma=gamma, spi_speed=spi_speed, **kwargs)
|
rec/BiblioPixel
|
bibliopixel/drivers/SPI/WS2801.py
|
Python
|
mit
| 643
|
#!/usr/bin/env python
import os
COV = None
if os.environ.get('FLASK_COVERAGE'):
import coverage
COV = coverage.coverage(branch=True, include='app/*')
COV.start()
from app import create_app, db
from app.models import User, Follow, Role, Permission, Post, Comment
from flask_script import Manager, Shell
from flask_migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Follow=Follow, Role=Role,
Permission=Permission, Post=Post, Comment=Comment)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test(coverage=False):
"""Run the unit tests."""
if coverage and not os.environ.get('FLASK_COVERAGE'):
import sys
os.environ['FLASK_COVERAGE'] = '1'
os.execvp(sys.executable, [sys.executable] + sys.argv)
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if COV:
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase()
@manager.command
def profile(length=25, profile_dir=None):
"""Start the application under the code profiler."""
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length],
profile_dir=profile_dir)
app.run()
@manager.command
def deploy():
"""Run deployment tasks."""
from flask_migrate import upgrade
from app.models import Role, User
# migrate database to latest revision
upgrade()
# create user roles
Role.insert_roles()
# create self-follows for all users
User.add_self_follows()
if __name__ == '__main__':
manager.run()
User.generate_fake(50)
Post.generate_fake(100)
|
itachigiotto/flasky
|
manage.py
|
Python
|
mit
| 2,224
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import division
from tempfile import NamedTemporaryFile, mktemp
from subprocess import check_output, call
from utils import uopen
import os
import re
import codecs
def tercom(hypotheses, references):
"""
Computes the TER between hypotheses and references.
:param hypotheses: [str]
:param references: [str]
:return: float
"""
with NamedTemporaryFile('w') as hypothesis_file, NamedTemporaryFile('w') as reference_file:
for i, (hypothesis, reference) in enumerate(zip(hypotheses, references)):
hypothesis_file.write('{} ({})\n'.format(hypothesis, i))
reference_file.write('{} ({})\n'.format(reference, i))
hypothesis_file.flush()
reference_file.flush()
cmd = ['java', '-jar', 'tercom.jar', '-h', hypothesis_file.name, '-r', reference_file.name]
output = check_output(cmd)
error = re.findall(r'Total TER: (.*?) ', output, re.MULTILINE)[0]
return float(error)
def tercom_unicode(hypotheses, references):
"""
Computes the TER between hypotheses and references.
:param hypotheses: [unicode]
:param references: [unicode]
:return: float
"""
writer = codecs.getwriter('utf-8')
with NamedTemporaryFile('w') as hypothesis_file, NamedTemporaryFile('w') as reference_file:
hypothesis_file = writer(hypothesis_file)
reference_file = writer(reference_file)
for i, (hypothesis, reference) in enumerate(zip(hypotheses, references)):
hypothesis_file.write(u'{} ({})\n'.format(hypothesis, i))
reference_file.write(u'{} ({})\n'.format(reference, i))
hypothesis_file.flush()
reference_file.flush()
cmd = ['java', '-jar', 'tercom.jar', '-h', hypothesis_file.name, '-r', reference_file.name]
output = check_output(cmd)
error = re.findall(r'Total TER: (.*?) ', output, re.MULTILINE)[0]
return float(error)
def tercom_scores(hypotheses, references):
"""
Returns a list of TERCOM scores
"""
with NamedTemporaryFile('w') as hypothesis_file, NamedTemporaryFile('w') as reference_file:
for i, (hypothesis, reference) in enumerate(zip(hypotheses, references)):
hypothesis_file.write('{} ({})\n'.format(hypothesis, i))
reference_file.write('{} ({})\n'.format(reference, i))
hypothesis_file.flush()
reference_file.flush()
filename = mktemp()
cmd = ['java', '-jar', 'tercom.jar', '-h', hypothesis_file.name, '-r', reference_file.name,
'-o', 'ter', '-n', filename]
output = open('/dev/null', 'w')
call(cmd, stdout=output, stderr=output)
with open(filename + '.ter') as f:
lines = list(f)
scores = [float(line.split(' ')[-1]) for line in lines[2:]]
os.remove(filename + '.ter')
return scores
def tercom_scores_unicode(hypotheses, references):
"""
Returns a list of TERCOM scores
"""
writer = codecs.getwriter('utf-8')
with NamedTemporaryFile('w') as hypothesis_file, NamedTemporaryFile('w') as reference_file:
hypothesis_file = writer(hypothesis_file)
reference_file = writer(reference_file)
for i, (hypothesis, reference) in enumerate(zip(hypotheses, references)):
hypothesis_file.write(u'{} ({})\n'.format(hypothesis, i))
reference_file.write(u'{} ({})\n'.format(reference, i))
hypothesis_file.flush()
reference_file.flush()
filename = mktemp()
cmd = ['java', '-jar', 'tercom.jar', '-h', hypothesis_file.name, '-r', reference_file.name,
'-o', 'ter', '-n', filename]
output = uopen('/dev/null', 'w')
call(cmd, stdout=output, stderr=output)
with uopen(filename + '.ter') as f:
lines = list(f)
scores = [float(line.split(' ')[-1]) for line in lines[2:]]
os.remove(filename + '.ter')
return scores
|
eske/RLPE
|
tercom.py
|
Python
|
apache-2.0
| 3,977
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
"""
< O que é o arquivo >
author: Cassiano Kunsch das Neves
last edited: <10/12/2015>
"""
class GravaArquivo(object):
@staticmethod
def startGravacao(diretorioArqDestino, lstDados, controlador):
BufferString = ''
qtnCasaDecimais = 8
arquivo = open(diretorioArqDestino, "w")
del(lstDados[0])
for lstLinhas in lstDados:
for conteudoColuna in lstLinhas:
qtnZeros = qtnCasaDecimais - len(conteudoColuna)
conteudoColunaMenor = str(conteudoColuna) + (("0")*qtnZeros)
conteudoColunaMenor = conteudoColunaMenor[:6]
BufferString += conteudoColunaMenor + ("\t")
controlador[2]+=1
BufferString += "\n"
arquivo.write(BufferString)
BufferString = ''
arquivo.close()
|
LEDS/Lab303
|
Solução em Desenvolvimento/Conversor/Model/Gravador.py
|
Python
|
gpl-3.0
| 887
|
from nose.tools import eq_
from receipts.receipts import Receipt
from amo.tests import TestCase
from mkt.receipts.tests.test_verify import sample
from mkt.receipts.utils import reissue_receipt
class TestReissue(TestCase):
def test_expired(self):
old = Receipt(sample).receipt_decoded()
new = Receipt(reissue_receipt(sample)).receipt_decoded()
for greater in ['exp', 'iat', 'nbf']:
assert new[greater] > old[greater], (
'{0} for new: {1} should be greater than old: {2}'.format(
greater, new[greater], old[greater]))
for same in ['product', 'detail', 'iss', 'reissue', 'typ', 'user',
'verify']:
eq_(new[same], old[same], (
'{0} for new: {1} should be the same as old: {2}'.format(
greater, new[same], old[same])))
|
jinankjain/zamboni
|
mkt/receipts/tests/test_utils_.py
|
Python
|
bsd-3-clause
| 871
|
from typing import Dict
from CreatureRogue.data_layer.growth_rate import GrowthRate
from CreatureRogue.data_layer.species import Species
class XpLookup:
def __init__(self, xp_map: Dict[GrowthRate, Dict[int, int]]):
self.xp_map = xp_map
def level_at_xp(self, species: Species, xp: int) -> int:
"""
The level that a creature is when it has exactly xp amount of
experience is determined by the species growth rate and some static
data which is checked here.
"""
level_xps = self.xp_map[species.growth_rate]
for level, level_xp in level_xps.items():
if xp < level_xp:
return level
return 0
def xp_at_level(self, species: Species, level: int) -> int:
"""
This is the minimum XP required to achieve a certain level for the
given species.
"""
return self.xp_map[species.growth_rate][level]
|
DaveTCode/CreatureRogue
|
CreatureRogue/data_layer/xp_lookup.py
|
Python
|
mit
| 961
|
### Taken from https://pypi.python.org/pypi/paho-mqtt
### Requires Paho-MQTT package, install by:
### pip install paho-mqtt
import paho.mqtt.client as mqtt
# Change accordingly to the MQTT Broker and topic you want to subscribe
# In the example it would be either "test.mosquitto.org" or "fd00::1" if
# running a mosquitto broker locally
MQTT_URL = "fd00::1"
MQTT_TOPIC_EVENT = "zolertia/evt/status"
MQTT_TOPIC_CMD = "zolertia/cmd/leds"
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
client.subscribe(MQTT_TOPIC_EVENT)
print("Subscribed to " + MQTT_TOPIC_EVENT)
client.subscribe(MQTT_TOPIC_CMD)
print("Subscribed to " + MQTT_TOPIC_CMD)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic + " " + str(msg.payload))
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
print("connecting to " + MQTT_URL)
client.connect(MQTT_URL, 1883, 60)
client.loop_forever()
|
miarcompanies/sdn-wise-contiki
|
contiki/examples/zolertia/tutorial/04-mqtt/mqtt-client.py
|
Python
|
bsd-3-clause
| 1,050
|
import six
from pyquery import PyQuery as pq
from olympia.amo.tests import BaseTestCase
from olympia.bandwagon.models import Collection
from olympia.bandwagon.templatetags.jinja_helpers import user_collection_list
class TestHelpers(BaseTestCase):
def test_user_collection_list(self):
c1 = Collection(uuid='eb4e3cd8-5cf1-4832-86fb-a90fc6d3765c')
c2 = Collection(uuid='61780943-e159-4206-8acd-0ae9f63f294c',
nickname='my_collection')
heading = 'My Heading'
response = six.text_type(user_collection_list([c1, c2], heading))
# heading
assert pq(response)('h3').text() == heading
# both items
# TODO reverse URLs
assert c1.get_url_path() in response, 'Collection UUID link missing.'
assert c2.get_url_path() in response, (
'Collection nickname link missing.')
# empty collection, empty response
response = six.text_type(user_collection_list([], heading))
assert not response, 'empty collection should not create a list'
|
aviarypl/mozilla-l10n-addons-server
|
src/olympia/bandwagon/tests/test_helpers.py
|
Python
|
bsd-3-clause
| 1,066
|
#!/usr/bin/env python
#
# $Id: iotop.py 1236 2011-12-13 19:00:35Z g.rodola $
#
# Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A clone of iotop (http://guichaz.free.fr/iotop/) showing real time
disk I/O statistics.
It works on Linux only (FreeBSD and OSX are missing support for IO
counters).
It doesn't work on Windows as curses module is required.
Author: Giampaolo Rodola' <g.rodola@gmail.com>
"""
import os
import sys
import psutil
if not hasattr(psutil.Process, 'get_io_counters') or os.name != 'posix':
sys.exit('platform not supported')
import time
import curses
import atexit
# --- curses stuff
def tear_down():
win.keypad(0)
curses.nocbreak()
curses.echo()
curses.endwin()
win = curses.initscr()
atexit.register(tear_down)
curses.endwin()
lineno = 0
def print_line(line, highlight=False):
"""A thin wrapper around curses's addstr()."""
global lineno
try:
if highlight:
line += " " * (win.getmaxyx()[1] - len(line))
win.addstr(lineno, 0, line, curses.A_REVERSE)
else:
win.addstr(lineno, 0, line, 0)
except curses.error:
lineno = 0
win.refresh()
raise
else:
lineno += 1
# --- /curses stuff
def bytes2human(n):
"""
>>> bytes2human(10000)
'9.8 K/s'
>>> bytes2human(100001221)
'95.4 M/s'
"""
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i+1)*10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.2f %s/s' % (value, s)
return '%.2f B/s' % (n)
def poll(interval):
"""Calculate IO usage by comparing IO statics before and
after the interval.
Return a tuple including all currently running processes
sorted by IO activity and total disks I/O activity.
"""
# first get a list of all processes and disk io counters
procs = [p for p in psutil.process_iter()]
for p in procs[:]:
try:
p._before = p.get_io_counters()
except psutil.Error:
procs.remove(p)
continue
disks_before = psutil.disk_io_counters()
# sleep some time
time.sleep(interval)
# then retrieve the same info again
for p in procs[:]:
try:
p._after = p.get_io_counters()
p._cmdline = ' '.join(p.cmdline)
if not p._cmdline:
p._cmdline = p.name
p._username = p.username
except psutil.NoSuchProcess:
procs.remove(p)
disks_after = psutil.disk_io_counters()
# finally calculate results by comparing data before and
# after the interval
for p in procs:
p._read_per_sec = p._after.read_bytes - p._before.read_bytes
p._write_per_sec = p._after.write_bytes - p._before.write_bytes
p._total = p._read_per_sec + p._write_per_sec
disks_read_per_sec = disks_after.read_bytes - disks_before.read_bytes
disks_write_per_sec = disks_after.write_bytes - disks_before.write_bytes
# sort processes by total disk IO so that the more intensive
# ones get listed first
processes = sorted(procs, key=lambda p: p._total, reverse=True)
return (processes, disks_read_per_sec, disks_write_per_sec)
def refresh_window(procs, disks_read, disks_write):
"""Print results on screen by using curses."""
curses.endwin()
templ = "%-5s %-7s %11s %11s %s"
win.erase()
disks_tot = "Total DISK READ: %s | Total DISK WRITE: %s" \
% (bytes2human(disks_read), bytes2human(disks_write))
print_line(disks_tot)
header = templ % ("PID", "USER", "DISK READ", "DISK WRITE", "COMMAND")
print_line(header, highlight=True)
for p in procs:
line = templ % (p.pid,
p._username[:7],
bytes2human(p._read_per_sec),
bytes2human(p._write_per_sec),
p._cmdline)
try:
print_line(line)
except curses.error:
break
win.refresh()
def main():
try:
interval = 0
while 1:
args = poll(interval)
refresh_window(*args)
interval = 1
except (KeyboardInterrupt, SystemExit):
pass
if __name__ == '__main__':
main()
|
wilebeast/FireFox-OS
|
B2G/gecko/python/psutil/examples/iotop.py
|
Python
|
apache-2.0
| 4,495
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
import json
from datetime import datetime
def main() -> None:
integrationInstance = demisto.integrationInstance()
demisto.debug(f'Command being called is {demisto.command()}')
try:
if demisto.command() == 'test-module':
return_results("ok")
elif demisto.command() == 'fetch-incidents':
data = json.loads(demisto.params().get('JSON'))
incident_name = demisto.params().get("name")
if not incident_name:
incident_name = f"Sample Incident - {integrationInstance}"
incidents = []
incident = {
'name': incident_name,
'details': json.dumps(data),
'occurred': datetime.now().isoformat().split("Z", 1)[0] + "Z",
'rawJSON': json.dumps(data)
}
incidents.append(incident)
demisto.incidents(incidents)
elif demisto.command() == 'json-sample-incident-generator-command':
key = demisto.args().get("key", None)
value = demisto.args().get("value", None)
data = json.loads(demisto.params()["JSON"])
if key and value:
if "," in key:
keys = key.split(",")
values = value.split(",")
for index, tmp_key in enumerate(keys):
data[tmp_key] = values[index]
else:
data[key] = value
command_results = CommandResults(
outputs_prefix='JSON.Sample',
outputs=data
)
return_results(command_results)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
demisto/content
|
Packs/JSONSampleIncidentGenerator/Integrations/JSONSampleIncidentGenerator/JSONSampleIncidentGenerator.py
|
Python
|
mit
| 2,058
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pure Python implementation of the Mann-Whitney U test.
This code is adapted from SciPy:
https://github.com/scipy/scipy/blob/master/scipy/stats/stats.py
Which is provided under a BSD-style license.
There is also a JavaScript version in Catapult:
https://github.com/catapult-project/catapult/blob/master/tracing/third_party/mannwhitneyu/mannwhitneyu.js
"""
import itertools
import math
def MannWhitneyU(x, y):
"""Computes the Mann-Whitney rank test on samples x and y.
The distribution of U is approximately normal for large samples. This
implementation uses the normal approximation, so it's recommended to have
sample sizes > 20.
"""
n1 = len(x)
n2 = len(y)
ranked = _RankData(x + y)
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + n1*(n1+1)/2.0 - sum(rankx) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
t = _TieCorrectionFactor(ranked)
if t == 0:
raise ValueError('All numbers are identical in mannwhitneyu')
sd = math.sqrt(t * n1 * n2 * (n1+n2+1) / 12.0)
mean_rank = n1*n2/2.0 + 0.5
big_u = max(u1, u2)
z = (big_u - mean_rank) / sd
return 2 * _NormSf(abs(z))
def _RankData(a):
"""Assigns ranks to data. Ties are given the mean of the ranks of the items.
This is called "fractional ranking":
https://en.wikipedia.org/wiki/Ranking
"""
sorter = _ArgSortReverse(a)
ranked_min = [0] * len(sorter)
for i, j in reversed(list(enumerate(sorter))):
ranked_min[j] = i
sorter = _ArgSort(a)
ranked_max = [0] * len(sorter)
for i, j in enumerate(sorter):
ranked_max[j] = i
return [1 + (x+y)/2.0 for x, y in zip(ranked_min, ranked_max)]
def _ArgSort(a):
"""Returns the indices that would sort an array.
Ties are given indices in ordinal order."""
return sorted(range(len(a)), key=a.__getitem__)
def _ArgSortReverse(a):
"""Returns the indices that would sort an array.
Ties are given indices in reverse ordinal order."""
return list(reversed(sorted(range(len(a)), key=a.__getitem__, reverse=True)))
def _TieCorrectionFactor(rankvals):
"""Tie correction factor for ties in the Mann-Whitney U test."""
arr = sorted(rankvals)
cnt = [len(list(group)) for _, group in itertools.groupby(arr)]
size = len(arr)
if size < 2:
return 1.0
else:
return 1.0 - sum(x**3 - x for x in cnt) / float(size**3 - size)
def _NormSf(x):
"""Survival function of the standard normal distribution. (1 - cdf)"""
return (1 - math.erf(x/math.sqrt(2))) / 2
|
benschmaus/catapult
|
dashboard/dashboard/pinpoint/mann_whitney_u.py
|
Python
|
bsd-3-clause
| 2,628
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'ProjectCountByMinute'
db.delete_table(u'sentry_projectcountbyminute')
# Deleting model 'GroupCountByMinute'
db.delete_table('sentry_messagecountbyminute')
def backwards(self, orm):
# Adding model 'ProjectCountByMinute'
db.create_table(
u'sentry_projectcountbyminute', (
(
'time_spent_count',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0)
),
('times_seen',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0)), (
'project', self.gf('sentry.db.models.fields.FlexibleForeignKey')(
to=orm['sentry.Project'], null=True
)
), (
'time_spent_total',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0)
), ('date', self.gf('django.db.models.fields.DateTimeField')()), (
'id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(
primary_key=True
)
),
)
)
db.send_create_signal('sentry', ['ProjectCountByMinute'])
# Adding unique constraint on 'ProjectCountByMinute', fields ['project', 'date']
db.create_unique(u'sentry_projectcountbyminute', ['project_id', 'date'])
# Adding model 'GroupCountByMinute'
db.create_table(
'sentry_messagecountbyminute', (
(
'project', self.gf('sentry.db.models.fields.FlexibleForeignKey')(
to=orm['sentry.Project'], null=True
)
), (
'time_spent_total',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0)
), (
'group',
self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Group'])
), (
'time_spent_count',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0)
), ('date', self.gf('django.db.models.fields.DateTimeField')(db_index=True)),
('times_seen',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0)), (
'id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(
primary_key=True
)
),
)
)
db.send_create_signal('sentry', ['GroupCountByMinute'])
# Adding unique constraint on 'GroupCountByMinute', fields ['project', 'group', 'date']
db.create_unique('sentry_messagecountbyminute', ['project_id', 'group_id', 'date'])
models = {
'sentry.accessgroup': {
'Meta': {
'unique_together': "(('team', 'name'),)",
'object_name': 'AccessGroup'
},
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.User']",
'symmetrical': 'False'
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Project']",
'symmetrical': 'False'
}
),
'team':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}),
'type': ('django.db.models.fields.IntegerField', [], {
'default': '50'
})
},
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Event']",
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.alert': {
'Meta': {
'object_name': 'Alert'
},
'data': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'related_groups': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'related_alerts'",
'symmetrical': 'False',
'through': "orm['sentry.AlertRelatedGroup']",
'to': "orm['sentry.Group']"
}
),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.alertrelatedgroup': {
'Meta': {
'unique_together': "(('group', 'alert'),)",
'object_name': 'AlertRelatedGroup'
},
'alert':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Alert']"
}),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
})
},
'sentry.event': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'"
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'event_set'",
'null': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments':
('django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'null': 'True'
}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'server_name': (
'django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True',
'db_index': 'True'
}
),
'site': (
'django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True',
'db_index': 'True'
}
),
'time_spent': ('django.db.models.fields.IntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'checksum'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments':
('django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'null': 'True'
}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'time_spent_total': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'times_seen': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {
'unique_together': "(('team', 'email'),)",
'object_name': 'PendingTeamMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'team': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'pending_member_set'",
'to': "orm['sentry.Team']"
}
),
'type': ('django.db.models.fields.IntegerField', [], {
'default': '50'
})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'),)",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'owner': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_owned_project_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True'
}),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']",
'null': 'True'
}
)
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
),
'user_added': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'keys_added_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'team_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.TeamMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'owner':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.teammember': {
'Meta': {
'unique_together': "(('team', 'user'),)",
'object_name': 'TeamMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'team': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Team']"
}
),
'type': ('django.db.models.fields.IntegerField', [], {
'default': '50'
}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_teammember_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'first_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'id': ('django.db.models.fields.AutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
|
looker/sentry
|
src/sentry/south_migrations/0115_auto__del_projectcountbyminute__del_unique_projectcountbyminute_projec.py
|
Python
|
bsd-3-clause
| 36,065
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-15 10:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='userMainTeble',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fild_one', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='Дата публикации')),
],
),
]
|
Necromoshka/orderHistory
|
journal/userMain/migrations/0001_initial.py
|
Python
|
mit
| 660
|
import shutil
import os
for entity in os.listdir("js"):
path = os.path.abspath( os.path.join("js", entity) )
if os.path.isdir(path):
shutil.rmtree( path )
elif os.path.isfile(path):
os.remove(path)
print "Cleaned JS output directory"
|
Adam01/Cylinder
|
clean_js.py
|
Python
|
mit
| 263
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Imports values from task 13 to test equality.
.. hint::
You can access task_12 data in the following example type:
.. code:: python
print task_12.FLOATVAL
"""
import task_12
FRAC_DEC_EQUAL = (task_12.DECVAL == task_12.FRACVAL)
DEC_FLOAT_INEQUAL = (task_12.DECVAL != task_12.FLOATVAL)
print FRAC_DEC_EQUAL
print DEC_FLOAT_INEQUAL
|
rrafiringa/is210-week-03-warmup
|
task_13.py
|
Python
|
mpl-2.0
| 413
|
# Copyright © 2020, Joseph Berry, Rico Tabor (opendrop.dev@gmail.com)
# OpenDrop is released under the GNU GPL License. You are free to
# modify and distribute the code, but always under the same license
# (i.e. you cannot make commercial derivatives).
#
# If you use this software in your research, please cite the following
# journal articles:
#
# J. D. Berry, M. J. Neeson, R. R. Dagastine, D. Y. C. Chan and
# R. F. Tabor, Measurement of surface and interfacial tension using
# pendant drop tensiometry. Journal of Colloid and Interface Science 454
# (2015) 226–237. https://doi.org/10.1016/j.jcis.2015.05.012
#
# E. Huang, T. Denning, A. Skoufis, J. Qi, R. R. Dagastine, R. F. Tabor
# and J. D. Berry, OpenDrop: Open-source software for pendant drop
# tensiometry & contact angle measurements, submitted to the Journal of
# Open Source Software
#
# These citations help us not only to understand who is using and
# developing OpenDrop, and for what purpose, but also to justify
# continued development of this code and other open source resources.
#
# OpenDrop is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this software. If not, see <https://www.gnu.org/licenses/>.
from . import configurator, usb_camera, genicam
|
ricotabor/opendrop
|
opendrop/app/common/image_acquisition/configurator/__init__.py
|
Python
|
gpl-2.0
| 1,447
|
"""
App wide event registry
Everything in the application is communicated via pubsub. These are the events
that tie everything together.
"""
import wx # type: ignore
WINDOW_STOP = wx.Window.NewControlId()
WINDOW_CANCEL = wx.Window.NewControlId()
WINDOW_CLOSE = wx.Window.NewControlId()
WINDOW_START = wx.Window.NewControlId()
WINDOW_RESTART = wx.Window.NewControlId()
WINDOW_EDIT = wx.Window.NewControlId()
WINDOW_CHANGE = wx.Window.NewControlId()
PANEL_CHANGE = wx.Window.NewControlId()
LIST_BOX = wx.Window.NewControlId()
CONSOLE_UPDATE = wx.Window.NewControlId()
EXECUTION_COMPLETE = wx.Window.NewControlId()
PROGRESS_UPDATE = wx.Window.NewControlId()
TIME_UPDATE = wx.Window.NewControlId()
USER_INPUT = wx.Window.NewControlId()
LEFT_DOWN = wx.Window.NewControlId()
|
chriskiehl/Gooey
|
gooey/gui/events.py
|
Python
|
mit
| 840
|
# -*- coding: utf-8 -*-
"""
Miscellaneous utility code for VAR estimation
"""
from statsmodels.compat.pandas import frequencies
from statsmodels.compat.python import asbytes
import numpy as np
import pandas as pd
import scipy.linalg.decomp as decomp
import scipy.stats as stats
import statsmodels.tsa.tsatools as tsa
#-------------------------------------------------------------------------------
# Auxiliary functions for estimation
def get_var_endog(y, lags, trend='c', has_constant='skip'):
"""
Make predictor matrix for VAR(p) process
Z := (Z_0, ..., Z_T).T (T x Kp)
Z_t = [1 y_t y_{t-1} ... y_{t - p + 1}] (Kp x 1)
Ref: Lütkepohl p.70 (transposed)
has_constant can be 'raise', 'add', or 'skip'. See add_constant.
"""
nobs = len(y)
# Ravel C order, need to put in descending order
Z = np.array([y[t-lags : t][::-1].ravel() for t in range(lags, nobs)])
# Add constant, trend, etc.
if trend != 'nc':
Z = tsa.add_trend(Z, prepend=True, trend=trend,
has_constant=has_constant)
return Z
def get_trendorder(trend='c'):
# Handle constant, etc.
if trend == 'c':
trendorder = 1
elif trend in ('n', 'nc'):
trendorder = 0
elif trend == 'ct':
trendorder = 2
elif trend == 'ctt':
trendorder = 3
return trendorder
def make_lag_names(names, lag_order, trendorder=1, exog=None):
"""
Produce list of lag-variable names. Constant / trends go at the beginning
Examples
--------
>>> make_lag_names(['foo', 'bar'], 2, 1)
['const', 'L1.foo', 'L1.bar', 'L2.foo', 'L2.bar']
"""
lag_names = []
if isinstance(names, str):
names = [names]
# take care of lagged endogenous names
for i in range(1, lag_order + 1):
for name in names:
if not isinstance(name, str):
name = str(name) # will need consistent unicode handling
lag_names.append('L'+str(i)+'.'+name)
# handle the constant name
if trendorder != 0:
lag_names.insert(0, 'const')
if trendorder > 1:
lag_names.insert(1, 'trend')
if trendorder > 2:
lag_names.insert(2, 'trend**2')
if exog is not None:
if isinstance(exog, pd.Series):
exog = pd.DataFrame(exog)
elif not hasattr(exog, 'ndim'):
exog = np.asarray(exog)
if exog.ndim == 1:
exog = exog[:, None]
for i in range(exog.shape[1]):
if isinstance(exog, pd.DataFrame):
exog_name = str(exog.columns[i])
else:
exog_name = "exog" + str(i)
lag_names.insert(trendorder + i, exog_name)
return lag_names
def comp_matrix(coefs):
"""
Return compansion matrix for the VAR(1) representation for a VAR(p) process
(companion form)
A = [A_1 A_2 ... A_p-1 A_p
I_K 0 0 0
0 I_K ... 0 0
0 ... I_K 0]
"""
p, k1, k2 = coefs.shape
if k1 != k2:
raise ValueError('coefs must be 3-d with shape (p, k, k).')
kp = k1 * p
result = np.zeros((kp, kp))
result[:k1] = np.concatenate(coefs, axis=1)
# Set I_K matrices
if p > 1:
result[np.arange(k1, kp), np.arange(kp-k1)] = 1
return result
#-------------------------------------------------------------------------------
# Miscellaneous stuff
def parse_lutkepohl_data(path): # pragma: no cover
"""
Parse data files from Lütkepohl (2005) book
Source for data files: www.jmulti.de
"""
from collections import deque
from datetime import datetime
import re
regex = re.compile(asbytes(r'<(.*) (\w)([\d]+)>.*'))
with open(path, 'rb') as f:
lines = deque(f)
to_skip = 0
while asbytes('*/') not in lines.popleft():
#while '*/' not in lines.popleft():
to_skip += 1
while True:
to_skip += 1
line = lines.popleft()
m = regex.match(line)
if m:
year, freq, start_point = m.groups()
break
data = (pd.read_csv(path, delimiter=r"\s+", header=to_skip+1)
.to_records(index=False))
n = len(data)
# generate the corresponding date range (using pandas for now)
start_point = int(start_point)
year = int(year)
offsets = {
asbytes('Q'): frequencies.BQuarterEnd(),
asbytes('M'): frequencies.BMonthEnd(),
asbytes('A'): frequencies.BYearEnd()
}
# create an instance
offset = offsets[freq]
inc = offset * (start_point - 1)
start_date = offset.rollforward(datetime(year, 1, 1)) + inc
offset = offsets[freq]
date_range = pd.date_range(start=start_date, freq=offset, periods=n)
return data, date_range
def norm_signif_level(alpha=0.05):
return stats.norm.ppf(1 - alpha / 2)
def acf_to_acorr(acf):
diag = np.diag(acf[0])
# numpy broadcasting sufficient
return acf / np.sqrt(np.outer(diag, diag))
def varsim(coefs, intercept, sig_u, steps=100, initvalues=None, seed=None):
"""
Simulate VAR(p) process, given coefficients and assuming Gaussian noise
Parameters
----------
coefs : ndarray
Coefficients for the VAR lags of endog.
intercept : None or ndarray 1-D (neqs,) or (steps, neqs)
This can be either the intercept for each equation or an offset.
If None, then the VAR process has a zero intercept.
If intercept is 1-D, then the same (endog specific) intercept is added
to all observations.
If intercept is 2-D, then it is treated as an offset and is added as
an observation specific intercept to the autoregression. In this case,
the intercept/offset should have same number of rows as steps, and the
same number of columns as endogenous variables (neqs).
sig_u : ndarray
Covariance matrix of the residuals or innovations.
If sig_u is None, then an identity matrix is used.
steps : {None, int}
number of observations to simulate, this includes the initial
observations to start the autoregressive process.
If offset is not None, then exog of the model are used if they were
provided in the model
seed : {None, int}
If seed is not None, then it will be used with for the random
variables generated by numpy.random.
Returns
-------
endog_simulated : nd_array
Endog of the simulated VAR process
"""
rs = np.random.RandomState(seed=seed)
rmvnorm = rs.multivariate_normal
p, k, k = coefs.shape
if sig_u is None:
sig_u = np.eye(k)
ugen = rmvnorm(np.zeros(len(sig_u)), sig_u, steps)
result = np.zeros((steps, k))
if intercept is not None:
# intercept can be 2-D like an offset variable
if np.ndim(intercept) > 1:
if not len(intercept) == len(ugen):
raise ValueError('2-D intercept needs to have length `steps`')
# add intercept/offset also to intial values
result += intercept
result[p:] += ugen[p:]
else:
result[p:] = ugen[p:]
# add in AR terms
for t in range(p, steps):
ygen = result[t]
for j in range(p):
ygen += np.dot(coefs[j], result[t-j-1])
return result
def get_index(lst, name):
try:
result = lst.index(name)
except Exception:
if not isinstance(name, int):
raise
result = name
return result
#method used repeatedly in Sims-Zha error bands
def eigval_decomp(sym_array):
"""
Returns
-------
W: array of eigenvectors
eigva: list of eigenvalues
k: largest eigenvector
"""
#check if symmetric, do not include shock period
eigva, W = decomp.eig(sym_array, left=True, right=False)
k = np.argmax(eigva)
return W, eigva, k
def vech(A):
"""
Simple vech operator
Returns
-------
vechvec: vector of all elements on and below diagonal
"""
length=A.shape[1]
vechvec=[]
for i in range(length):
b=i
while b < length:
vechvec.append(A[b,i])
b=b+1
vechvec=np.asarray(vechvec)
return vechvec
def seasonal_dummies(n_seasons, len_endog, first_period=0, centered=False):
"""
Parameters
----------
n_seasons : int >= 0
Number of seasons (e.g. 12 for monthly data and 4 for quarterly data).
len_endog : int >= 0
Total number of observations.
first_period : int, default: 0
Season of the first observation. As an example, suppose we have monthly
data and the first observation is in March (third month of the year).
In this case we pass 2 as first_period. (0 for the first season,
1 for the second, ..., n_seasons-1 for the last season).
An integer greater than n_seasons-1 are treated in the same way as the
integer modulo n_seasons.
centered : bool, default: False
If True, center (demean) the dummy variables. That is useful in order
to get seasonal dummies that are orthogonal to the vector of constant
dummy variables (a vector of ones).
Returns
-------
seasonal_dummies : ndarray (len_endog x n_seasons-1)
"""
if n_seasons == 0:
return np.empty((len_endog, 0))
if n_seasons > 0:
season_exog = np.zeros((len_endog, n_seasons - 1))
for i in range(n_seasons - 1):
season_exog[(i-first_period) % n_seasons::n_seasons, i] = 1
if centered:
season_exog -= 1 / n_seasons
return season_exog
|
jseabold/statsmodels
|
statsmodels/tsa/vector_ar/util.py
|
Python
|
bsd-3-clause
| 9,611
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 5 08:58:46 2015
@author: msutton1
"""
import pandas as pd
import random as rd
namesList = pd.read_excel('SecretSanta.xlsx', sheetname = 'list')
i=0
usedlist = []
length = len(namesList.index)
while (i<length):
rand = rd.randint(0,length-1)
if namesList.couple[i] == namesList.couple[rand] or rand in usedlist:
continue
else:
namesList.recipient[i] = namesList.Recipientlist[rand]
usedlist.append(rand)
i=i+1
print namesList
print usedlist
|
MarshallSutton/Secret-Santa
|
SecretSanta.py
|
Python
|
cc0-1.0
| 568
|
import math
T = int(raw_input())
for i in range(T):
N = int(raw_input())
print int(math.sqrt(N))
|
rohit91/codechef
|
codechef/FSQRT.py
|
Python
|
gpl-2.0
| 107
|
from pyramid.response import *
|
ztane/tet
|
tet/response.py
|
Python
|
bsd-3-clause
| 32
|
# Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for the Credentials Python bindings.
Note that this just tests the bindings work. It does not intend to test
the functionality, that's already done in other tests.
"""
from samba import credentials
import samba.tests
import os
import binascii
from samba.dcerpc import misc
class CredentialsTests(samba.tests.TestCaseInTempDir):
def setUp(self):
super(CredentialsTests, self).setUp()
self.creds = credentials.Credentials()
def test_set_username(self):
self.creds.set_username("somebody")
self.assertEqual("somebody", self.creds.get_username())
def test_set_password(self):
self.creds.set_password("S3CreT")
self.assertEqual("S3CreT", self.creds.get_password())
def test_set_utf16_password(self):
password = 'S3cRet'
passbytes = password.encode('utf-16-le')
self.assertTrue(self.creds.set_utf16_password(passbytes))
self.assertEqual(password, self.creds.get_password())
def test_set_old_password(self):
self.assertEqual(None, self.creds.get_old_password())
self.assertTrue(self.creds.set_old_password("S3c0ndS3CreT"))
self.assertEqual("S3c0ndS3CreT", self.creds.get_old_password())
def test_set_old_utf16_password(self):
password = '0ldS3cRet'
passbytes = password.encode('utf-16-le')
self.assertTrue(self.creds.set_old_utf16_password(passbytes))
self.assertEqual(password, self.creds.get_old_password())
def test_set_domain(self):
self.creds.set_domain("ABMAS")
self.assertEqual("ABMAS", self.creds.get_domain())
self.assertEqual(self.creds.get_principal(), None)
def test_set_realm(self):
self.creds.set_realm("myrealm")
self.assertEqual("MYREALM", self.creds.get_realm())
self.assertEqual(self.creds.get_principal(), None)
def test_parse_string_anon(self):
self.creds.parse_string("%")
self.assertEqual("", self.creds.get_username())
self.assertEqual(None, self.creds.get_password())
def test_parse_string_empty_pw(self):
self.creds.parse_string("someone%")
self.assertEqual("someone", self.creds.get_username())
self.assertEqual("", self.creds.get_password())
def test_parse_string_none_pw(self):
self.creds.parse_string("someone")
self.assertEqual("someone", self.creds.get_username())
self.assertEqual(None, self.creds.get_password())
def test_parse_string_user_pw_domain(self):
self.creds.parse_string("dom\\someone%secr")
self.assertEqual("someone", self.creds.get_username())
self.assertEqual("secr", self.creds.get_password())
self.assertEqual("DOM", self.creds.get_domain())
def test_bind_dn(self):
self.assertEqual(None, self.creds.get_bind_dn())
self.creds.set_bind_dn("dc=foo,cn=bar")
self.assertEqual("dc=foo,cn=bar", self.creds.get_bind_dn())
def test_is_anon(self):
self.creds.set_username("")
self.assertTrue(self.creds.is_anonymous())
self.creds.set_username("somebody")
self.assertFalse(self.creds.is_anonymous())
self.creds.set_anonymous()
self.assertTrue(self.creds.is_anonymous())
def test_workstation(self):
# FIXME: This is uninitialised, it should be None
#self.assertEqual(None, self.creds.get_workstation())
self.creds.set_workstation("myworksta")
self.assertEqual("myworksta", self.creds.get_workstation())
def test_secure_channel_type(self):
self.assertEqual(misc.SEC_CHAN_NULL,
self.creds.get_secure_channel_type())
self.creds.set_secure_channel_type(misc.SEC_CHAN_BDC)
self.assertEqual(misc.SEC_CHAN_BDC,
self.creds.get_secure_channel_type())
def test_get_nt_hash(self):
password = "geheim"
hex_nthash = "c2ae1fe6e648846352453e816f2aeb93"
self.creds.set_password(password)
self.assertEqual(password, self.creds.get_password())
self.assertEqual(binascii.a2b_hex(hex_nthash),
self.creds.get_nt_hash())
def test_get_ntlm_response(self):
password = "SecREt01"
hex_challenge = "0123456789abcdef"
hex_nthash = "cd06ca7c7e10c99b1d33b7485a2ed808"
hex_session_key = "3f373ea8e4af954f14faa506f8eebdc4"
hex_ntlm_response = "25a98c1c31e81847466b29b2df4680f39958fb8c213a9cc6"
self.creds.set_username("fred")
self.creds.set_domain("nurk")
self.creds.set_password(password)
self.assertEqual(password, self.creds.get_password())
self.assertEqual(binascii.a2b_hex(hex_nthash),
self.creds.get_nt_hash())
response = self.creds.get_ntlm_response(flags=credentials.CLI_CRED_NTLM_AUTH,
challenge=binascii.a2b_hex(hex_challenge))
self.assertEqual(response["nt_response"], binascii.a2b_hex(hex_ntlm_response))
self.assertEqual(response["nt_session_key"], binascii.a2b_hex(hex_session_key))
self.assertEqual(response["flags"], credentials.CLI_CRED_NTLM_AUTH)
def test_get_nt_hash_string(self):
self.creds.set_password_will_be_nt_hash(True)
hex_nthash = "c2ae1fe6e648846352453e816f2aeb93"
self.creds.set_password(hex_nthash)
self.assertEqual(None, self.creds.get_password())
self.assertEqual(binascii.a2b_hex(hex_nthash),
self.creds.get_nt_hash())
def test_set_cmdline_callbacks(self):
self.creds.set_cmdline_callbacks()
def test_authentication_requested(self):
self.creds.set_username("")
self.assertFalse(self.creds.authentication_requested())
self.creds.set_username("somebody")
self.assertTrue(self.creds.authentication_requested())
def test_wrong_password(self):
self.assertFalse(self.creds.wrong_password())
def test_guess(self):
creds = credentials.Credentials()
lp = samba.tests.env_loadparm()
os.environ["USER"] = "env_user"
creds.guess(lp)
self.assertEqual(creds.get_username(), "env_user")
self.assertEqual(creds.get_domain(), lp.get("workgroup").upper())
self.assertEqual(creds.get_realm(), None)
self.assertEqual(creds.get_principal(), "env_user@%s" % creds.get_domain())
self.assertEqual(creds.is_anonymous(), False)
self.assertEqual(creds.authentication_requested(), False)
def test_set_anonymous(self):
creds = credentials.Credentials()
lp = samba.tests.env_loadparm()
os.environ["USER"] = "env_user"
creds.guess(lp)
creds.set_anonymous()
self.assertEqual(creds.get_username(), "")
self.assertEqual(creds.get_domain(), "")
self.assertEqual(creds.get_realm(), None)
self.assertEqual(creds.get_principal(), None)
self.assertEqual(creds.is_anonymous(), True)
self.assertEqual(creds.authentication_requested(), False)
def test_parse_file_1(self):
realm = "realm.example.com"
domain = "dom"
password = "pass"
username = "user"
passwd_file_name = os.path.join(self.tempdir, "parse_file")
passwd_file_fd = open(passwd_file_name, 'x')
passwd_file_fd.write("realm=%s\n" % realm)
passwd_file_fd.write("domain=%s\n" % domain)
passwd_file_fd.write("username=%s\n" % username)
passwd_file_fd.write("password=%s\n" % password)
passwd_file_fd.close()
self.creds.parse_file(passwd_file_name)
self.assertEqual(self.creds.get_username(), username)
self.assertEqual(self.creds.get_password(), password)
self.assertEqual(self.creds.get_domain(), domain.upper())
self.assertEqual(self.creds.get_realm(), realm.upper())
self.assertEqual(self.creds.get_principal(), "%s@%s" % (username, realm.upper()))
self.assertEqual(self.creds.is_anonymous(), False)
self.assertEqual(self.creds.authentication_requested(), True)
os.unlink(passwd_file_name)
def test_parse_file_2(self):
realm = "realm.example.com"
domain = "dom"
password = "pass"
username = "user"
passwd_file_name = os.path.join(self.tempdir, "parse_file")
passwd_file_fd = open(passwd_file_name, 'x')
passwd_file_fd.write("realm=%s\n" % realm)
passwd_file_fd.write("domain=%s\n" % domain)
passwd_file_fd.write("username=%s\\%s\n" % (domain, username))
passwd_file_fd.write("password=%s\n" % password)
passwd_file_fd.close()
self.creds.parse_file(passwd_file_name)
self.assertEqual(self.creds.get_username(), username)
self.assertEqual(self.creds.get_password(), password)
self.assertEqual(self.creds.get_domain(), domain.upper())
self.assertEqual(self.creds.get_realm(), realm.upper())
self.assertEqual(self.creds.get_principal(), "%s@%s" % (username, realm.upper()))
self.assertEqual(self.creds.is_anonymous(), False)
self.assertEqual(self.creds.authentication_requested(), True)
os.unlink(passwd_file_name)
def test_parse_file_3(self):
realm = "realm.example.com"
domain = "domain"
password = "password"
username = "username"
userdom = "userdom"
passwd_file_name = os.path.join(self.tempdir, "parse_file")
passwd_file_fd = open(passwd_file_name, 'x')
passwd_file_fd.write("realm=%s\n" % realm)
passwd_file_fd.write("domain=%s\n" % domain)
passwd_file_fd.write("username=%s/%s\n" % (userdom, username))
passwd_file_fd.write("password=%s\n" % password)
passwd_file_fd.close()
self.creds.parse_file(passwd_file_name)
self.assertEqual(self.creds.get_username(), username)
self.assertEqual(self.creds.get_password(), password)
self.assertEqual(self.creds.get_domain(), userdom.upper())
self.assertEqual(self.creds.get_realm(), userdom.upper())
self.assertEqual(self.creds.get_principal(), "%s@%s" % (username, userdom.upper()))
self.assertEqual(self.creds.is_anonymous(), False)
self.assertEqual(self.creds.authentication_requested(), True)
os.unlink(passwd_file_name)
def test_parse_file_4(self):
password = "password"
username = "username"
userdom = "userdom"
passwd_file_name = os.path.join(self.tempdir, "parse_file")
passwd_file_fd = open(passwd_file_name, 'x')
passwd_file_fd.write("username=%s\\%s%%%s\n" % (userdom, username, password))
passwd_file_fd.write("realm=ignorerealm\n")
passwd_file_fd.write("domain=ignoredomain\n")
passwd_file_fd.write("password=ignorepassword\n")
passwd_file_fd.close()
self.creds.parse_file(passwd_file_name)
self.assertEqual(self.creds.get_username(), username)
self.assertEqual(self.creds.get_password(), password)
self.assertEqual(self.creds.get_domain(), userdom.upper())
self.assertEqual(self.creds.get_realm(), userdom.upper())
self.assertEqual(self.creds.get_principal(), "%s@%s" % (username, userdom.upper()))
self.assertEqual(self.creds.is_anonymous(), False)
self.assertEqual(self.creds.authentication_requested(), True)
os.unlink(passwd_file_name)
def test_parse_file_5(self):
password = "password"
username = "username"
userdom = "userdom"
passwd_file_name = os.path.join(self.tempdir, "parse_file")
passwd_file_fd = open(passwd_file_name, 'x')
passwd_file_fd.write("realm=ignorerealm\n")
passwd_file_fd.write("username=%s\\%s%%%s\n" % (userdom, username, password))
passwd_file_fd.write("domain=ignoredomain\n")
passwd_file_fd.write("password=ignorepassword\n")
passwd_file_fd.close()
self.creds.parse_file(passwd_file_name)
self.assertEqual(self.creds.get_username(), username)
self.assertEqual(self.creds.get_password(), password)
self.assertEqual(self.creds.get_domain(), userdom.upper())
self.assertEqual(self.creds.get_realm(), userdom.upper())
self.assertEqual(self.creds.get_principal(), "%s@%s" % (username, userdom.upper()))
self.assertEqual(self.creds.is_anonymous(), False)
self.assertEqual(self.creds.authentication_requested(), True)
os.unlink(passwd_file_name)
def test_parse_username_0(self):
creds = credentials.Credentials()
lp = samba.tests.env_loadparm()
os.environ["USER"] = "env_user"
creds.guess(lp)
creds.parse_string("user")
self.assertEqual(creds.get_username(), "user")
self.assertEqual(creds.get_domain(), lp.get("workgroup").upper())
self.assertEqual(creds.get_realm(), None)
self.assertEqual(creds.get_principal(), "user@%s" % lp.get("workgroup").upper())
self.assertEqual(creds.is_anonymous(), False)
self.assertEqual(creds.authentication_requested(), True)
def test_parse_username_1(self):
creds = credentials.Credentials()
lp = samba.tests.env_loadparm()
os.environ["USER"] = "env_user"
creds.guess(lp)
realm = "realm.example.com"
creds.set_realm(realm, credentials.SMB_CONF)
creds.parse_string("user")
self.assertEqual(creds.get_username(), "user")
self.assertEqual(creds.get_domain(), lp.get("workgroup").upper())
self.assertEqual(creds.get_realm(), realm.upper())
self.assertEqual(creds.get_principal(), "user@%s" % realm.upper())
self.assertEqual(creds.is_anonymous(), False)
self.assertEqual(creds.authentication_requested(), True)
def test_parse_username_with_domain_0(self):
creds = credentials.Credentials()
lp = samba.tests.env_loadparm()
os.environ["USER"] = "env_user"
creds.guess(lp)
creds.parse_string("domain\\user")
self.assertEqual(creds.get_username(), "user")
self.assertEqual(creds.get_domain(), "DOMAIN")
self.assertEqual(creds.get_realm(), None)
self.assertEqual(creds.get_principal(), "user@DOMAIN")
self.assertEqual(creds.is_anonymous(), False)
self.assertEqual(creds.authentication_requested(), True)
def test_parse_username_with_domain_1(self):
creds = credentials.Credentials()
lp = samba.tests.env_loadparm()
os.environ["USER"] = "env_user"
creds.guess(lp)
realm = "realm.example.com"
creds.set_realm(realm, credentials.SMB_CONF)
self.assertEqual(creds.get_username(), "env_user")
self.assertEqual(creds.get_domain(), lp.get("workgroup").upper())
self.assertEqual(creds.get_realm(), realm.upper())
self.assertEqual(creds.get_principal(), "env_user@%s" % realm.upper())
creds.set_principal("unknown@realm.example.com")
self.assertEqual(creds.get_username(), "env_user")
self.assertEqual(creds.get_domain(), lp.get("workgroup").upper())
self.assertEqual(creds.get_realm(), realm.upper())
self.assertEqual(creds.get_principal(), "unknown@realm.example.com")
creds.parse_string("domain\\user")
self.assertEqual(creds.get_username(), "user")
self.assertEqual(creds.get_domain(), "DOMAIN")
self.assertEqual(creds.get_realm(), realm.upper())
self.assertEqual(creds.get_principal(), "user@DOMAIN")
self.assertEqual(creds.is_anonymous(), False)
self.assertEqual(creds.authentication_requested(), True)
def test_parse_username_with_domain_2(self):
creds = credentials.Credentials()
lp = samba.tests.env_loadparm()
os.environ["USER"] = "env_user"
creds.guess(lp)
realm = "realm.example.com"
creds.set_realm(realm, credentials.SPECIFIED)
self.assertEqual(creds.get_username(), "env_user")
self.assertEqual(creds.get_domain(), lp.get("workgroup").upper())
self.assertEqual(creds.get_realm(), realm.upper())
self.assertEqual(creds.get_principal(), "env_user@%s" % realm.upper())
creds.set_principal("unknown@realm.example.com")
self.assertEqual(creds.get_username(), "env_user")
self.assertEqual(creds.get_domain(), lp.get("workgroup").upper())
self.assertEqual(creds.get_realm(), realm.upper())
self.assertEqual(creds.get_principal(), "unknown@realm.example.com")
creds.parse_string("domain\\user")
self.assertEqual(creds.get_username(), "user")
self.assertEqual(creds.get_domain(), "DOMAIN")
self.assertEqual(creds.get_realm(), "DOMAIN")
self.assertEqual(creds.get_principal(), "user@DOMAIN")
self.assertEqual(creds.is_anonymous(), False)
self.assertEqual(creds.authentication_requested(), True)
def test_parse_username_with_realm(self):
creds = credentials.Credentials()
lp = samba.tests.env_loadparm()
os.environ["USER"] = "env_user"
creds.guess(lp)
creds.parse_string("user@samba.org")
self.assertEqual(creds.get_username(), "user@samba.org")
self.assertEqual(creds.get_domain(), "")
self.assertEqual(creds.get_realm(), "SAMBA.ORG")
self.assertEqual(creds.get_principal(), "user@samba.org")
self.assertEqual(creds.is_anonymous(), False)
self.assertEqual(creds.authentication_requested(), True)
def test_parse_username_pw(self):
creds = credentials.Credentials()
lp = samba.tests.env_loadparm()
os.environ["USER"] = "env_user"
creds.guess(lp)
creds.parse_string("user%pass")
self.assertEqual(creds.get_username(), "user")
self.assertEqual(creds.get_password(), "pass")
self.assertEqual(creds.get_domain(), lp.get("workgroup"))
self.assertEqual(creds.get_realm(), None)
self.assertEqual(creds.get_principal(), "user@%s" % lp.get("workgroup"))
self.assertEqual(creds.is_anonymous(), False)
self.assertEqual(creds.authentication_requested(), True)
def test_parse_username_with_domain_pw(self):
creds = credentials.Credentials()
lp = samba.tests.env_loadparm()
os.environ["USER"] = "env_user"
creds.guess(lp)
creds.parse_string("domain\\user%pass")
self.assertEqual(creds.get_username(), "user")
self.assertEqual(creds.get_domain(), "DOMAIN")
self.assertEqual(creds.get_password(), "pass")
self.assertEqual(creds.get_realm(), None)
self.assertEqual(creds.get_principal(), "user@DOMAIN")
self.assertEqual(creds.is_anonymous(), False)
self.assertEqual(creds.authentication_requested(), True)
def test_parse_username_with_realm_pw(self):
creds = credentials.Credentials()
lp = samba.tests.env_loadparm()
os.environ["USER"] = "env_user"
creds.guess(lp)
creds.parse_string("user@samba.org%pass")
self.assertEqual(creds.get_username(), "user@samba.org")
self.assertEqual(creds.get_domain(), "")
self.assertEqual(creds.get_password(), "pass")
self.assertEqual(creds.get_realm(), "SAMBA.ORG")
self.assertEqual(creds.get_principal(), "user@samba.org")
self.assertEqual(creds.is_anonymous(), False)
self.assertEqual(creds.authentication_requested(), True)
def test_smb_signing(self):
creds = credentials.Credentials()
self.assertEqual(creds.get_smb_signing(), credentials.SMB_SIGNING_DEFAULT)
creds.set_smb_signing(credentials.SMB_SIGNING_REQUIRED)
self.assertEqual(creds.get_smb_signing(), credentials.SMB_SIGNING_REQUIRED)
def test_smb_signing_set_conf(self):
lp = samba.tests.env_loadparm()
creds = credentials.Credentials()
creds.set_conf(lp)
self.assertEqual(creds.get_smb_signing(), credentials.SMB_SIGNING_DEFAULT)
creds.set_smb_signing(credentials.SMB_SIGNING_OFF)
self.assertEqual(creds.get_smb_signing(), credentials.SMB_SIGNING_OFF)
creds.set_conf(lp)
self.assertEqual(creds.get_smb_signing(), credentials.SMB_SIGNING_OFF)
def test_smb_ipc_signing(self):
creds = credentials.Credentials()
self.assertEqual(creds.get_smb_ipc_signing(), credentials.SMB_SIGNING_REQUIRED)
creds.set_smb_ipc_signing(credentials.SMB_SIGNING_OFF)
self.assertEqual(creds.get_smb_ipc_signing(), credentials.SMB_SIGNING_OFF)
def test_smb_ipc_signing_set_conf(self):
lp = samba.tests.env_loadparm()
creds = credentials.Credentials()
creds.set_conf(lp)
self.assertEqual(creds.get_smb_ipc_signing(), credentials.SMB_SIGNING_REQUIRED)
creds.set_smb_ipc_signing(credentials.SMB_SIGNING_OFF)
self.assertEqual(creds.get_smb_ipc_signing(), credentials.SMB_SIGNING_OFF)
creds.set_conf(lp)
self.assertEqual(creds.get_smb_ipc_signing(), credentials.SMB_SIGNING_OFF)
def test_smb_encryption(self):
creds = credentials.Credentials()
self.assertEqual(creds.get_smb_encryption(), credentials.SMB_ENCRYPTION_DEFAULT)
creds.set_smb_encryption(credentials.SMB_ENCRYPTION_REQUIRED)
self.assertEqual(creds.get_smb_encryption(), credentials.SMB_ENCRYPTION_REQUIRED)
def test_smb_encryption_set_conf(self):
lp = samba.tests.env_loadparm()
creds = credentials.Credentials()
creds.set_conf(lp)
self.assertEqual(creds.get_smb_encryption(), credentials.SMB_ENCRYPTION_DEFAULT)
creds.set_smb_encryption(credentials.SMB_ENCRYPTION_OFF)
self.assertEqual(creds.get_smb_encryption(), credentials.SMB_ENCRYPTION_OFF)
creds.set_conf(lp)
self.assertEqual(creds.get_smb_encryption(), credentials.SMB_ENCRYPTION_OFF)
|
kernevil/samba
|
python/samba/tests/credentials.py
|
Python
|
gpl-3.0
| 22,660
|
# Copyright 2013 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from rally.common import log as logging
from rally import consts
from rally import exceptions
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils
from rally.plugins.openstack.scenarios.glance import utils as glance_utils
from rally.plugins.openstack.scenarios.nova import utils as nova_utils
from rally.task import types
from rally.task import validation
LOG = logging.getLogger(__name__)
class CinderVolumes(cinder_utils.CinderScenario,
nova_utils.NovaScenario,
glance_utils.GlanceScenario):
"""Benchmark scenarios for Cinder Volumes."""
@types.set(image=types.ImageResourceType)
@validation.image_exists("image", nullable=True)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_list_volume(self, size, detailed=True,
image=None, **kwargs):
"""Create a volume and list all volumes.
Measure the "cinder volume-list" command performance.
If you have only 1 user in your context, you will
add 1 volume on every iteration. So you will have more
and more volumes and will be able to measure the
performance of the "cinder volume-list" command depending on
the number of images owned by users.
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param detailed: determines whether the volume listing should contain
detailed information about all of them
:param image: image to be used to create volume
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
self._create_volume(size, **kwargs)
self._list_volumes(detailed)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def list_volumes(self, detailed=True):
"""List all volumes.
This simple scenario tests the cinder list command by listing
all the volumes.
:param detailed: True if detailed information about volumes
should be listed
"""
self._list_volumes(detailed)
@types.set(image=types.ImageResourceType)
@validation.image_exists("image", nullable=True)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_update_volume(self, size, image=None,
create_volume_kwargs=None,
update_volume_kwargs=None):
"""Create a volume and update its name and description.
:param size: volume size (integer, in GB)
:param image: image to be used to create volume
:param create_volume_kwargs: dict, to be used to create volume
:param update_volume_kwargs: dict, to be used to update volume
"""
create_volume_kwargs = create_volume_kwargs or {}
update_volume_kwargs = update_volume_kwargs or {}
if image:
create_volume_kwargs["imageRef"] = image
volume = self._create_volume(size, **create_volume_kwargs)
self._update_volume(volume, **update_volume_kwargs)
@types.set(image=types.ImageResourceType)
@validation.image_exists("image", nullable=True)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_delete_volume(self, size, image=None,
min_sleep=0, max_sleep=0,
**kwargs):
"""Create and then delete a volume.
Good for testing a maximal bandwidth of cloud. Optional 'min_sleep'
and 'max_sleep' parameters allow the scenario to simulate a pause
between volume creation and deletion (of random duration from
[min_sleep, max_sleep]).
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: image to be used to create volume
:param min_sleep: minimum sleep time between volume creation and
deletion (in seconds)
:param max_sleep: maximum sleep time between volume creation and
deletion (in seconds)
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
volume = self._create_volume(size, **kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_volume(volume)
@types.set(image=types.ImageResourceType)
@validation.image_exists("image", nullable=True)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_volume(self, size, image=None, **kwargs):
"""Create a volume.
Good test to check how influence amount of active volumes on
performance of creating new.
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: image to be used to create volume
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
self._create_volume(size, **kwargs)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@validation.required_contexts("volumes")
@scenario.configure(context={"cleanup": ["cinder"]})
def modify_volume_metadata(self, sets=10, set_size=3,
deletes=5, delete_size=3):
"""Modify a volume's metadata.
This requires a volume to be created with the volumes
context. Additionally, ``sets * set_size`` must be greater
than or equal to ``deletes * delete_size``.
:param sets: how many set_metadata operations to perform
:param set_size: number of metadata keys to set in each
set_metadata operation
:param deletes: how many delete_metadata operations to perform
:param delete_size: number of metadata keys to delete in each
delete_metadata operation
"""
if sets * set_size < deletes * delete_size:
raise exceptions.InvalidArgumentsException(
"Not enough metadata keys will be created: "
"Setting %(num_keys)s keys, but deleting %(num_deletes)s" %
{"num_keys": sets * set_size,
"num_deletes": deletes * delete_size})
volume = random.choice(self.context["tenant"]["volumes"])
keys = self._set_metadata(volume["id"], sets, set_size)
self._delete_metadata(volume["id"], keys, deletes, delete_size)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_extend_volume(self, size, new_size, min_sleep=0,
max_sleep=0, **kwargs):
"""Create and extend a volume and then delete it.
:param size: volume size (in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param new_size: volume new size (in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
to extend.
Notice: should be bigger volume size
:param min_sleep: minimum sleep time between volume extension and
deletion (in seconds)
:param max_sleep: maximum sleep time between volume extension and
deletion (in seconds)
:param kwargs: optional args to extend the volume
"""
volume = self._create_volume(size, **kwargs)
self._extend_volume(volume, new_size)
self.sleep_between(min_sleep, max_sleep)
self._delete_volume(volume)
@validation.required_services(consts.Service.CINDER)
@validation.required_contexts("volumes")
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_from_volume_and_delete_volume(self, size, min_sleep=0,
max_sleep=0, **kwargs):
"""Create volume from volume and then delete it.
Scenario for testing volume clone.Optional 'min_sleep' and 'max_sleep'
parameters allow the scenario to simulate a pause between volume
creation and deletion (of random duration from [min_sleep, max_sleep]).
:param size: volume size (in GB), or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
Should be equal or bigger source volume size
:param min_sleep: minimum sleep time between volume creation and
deletion (in seconds)
:param max_sleep: maximum sleep time between volume creation and
deletion (in seconds)
:param kwargs: optional args to create a volume
"""
source_vol = random.choice(self.context["tenant"]["volumes"])
volume = self._create_volume(size, source_volid=source_vol["id"],
**kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_volume(volume)
@validation.required_services(consts.Service.CINDER)
@validation.required_contexts("volumes")
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_delete_snapshot(self, force=False, min_sleep=0,
max_sleep=0, **kwargs):
"""Create and then delete a volume-snapshot.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between snapshot creation and deletion
(of random duration from [min_sleep, max_sleep]).
:param force: when set to True, allows snapshot of a volume when
the volume is attached to an instance
:param min_sleep: minimum sleep time between snapshot creation and
deletion (in seconds)
:param max_sleep: maximum sleep time between snapshot creation and
deletion (in seconds)
:param kwargs: optional args to create a snapshot
"""
volume = random.choice(self.context["tenant"]["volumes"])
snapshot = self._create_snapshot(volume["id"], force=force, **kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_snapshot(snapshot)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder", "nova"]})
def create_and_attach_volume(self, size, image, flavor, **kwargs):
"""Create a VM and attach a volume to it.
Simple test to create a VM and attach a volume, then
detach the volume and delete volume/VM.
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: Glance image name to use for the VM
:param flavor: VM flavor name
:param kwargs: optional arguments for VM creation
"""
server = self._boot_server(image, flavor, **kwargs)
volume = self._create_volume(size)
self._attach_volume(server, volume)
self._detach_volume(server, volume)
self._delete_volume(volume)
self._delete_server(server)
@validation.volume_type_exists("volume_type")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder", "nova"]})
def create_snapshot_and_attach_volume(self, volume_type=False,
size=None, **kwargs):
"""Create volume, snapshot and attach/detach volume.
This scenario is based off of the standalone qaStressTest.py
(https://github.com/WaltHP/cinder-stress).
:param volume_type: Whether or not to specify volume type when creating
volumes.
:param size: Volume size - dictionary, contains two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
default values: {"min": 1, "max": 5}
:param kwargs: Optional parameters used during volume
snapshot creation.
"""
if size is None:
size = {"min": 1, "max": 5}
selected_type = None
volume_types = [None]
if volume_type:
volume_types_list = self.clients("cinder").volume_types.list()
for s in volume_types_list:
volume_types.append(s.name)
selected_type = random.choice(volume_types)
volume = self._create_volume(size, volume_type=selected_type)
snapshot = self._create_snapshot(volume.id, False, **kwargs)
server = self.get_random_server()
self._attach_volume(server, volume)
self._detach_volume(server, volume)
self._delete_snapshot(snapshot)
self._delete_volume(volume)
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder", "nova"]})
@logging.log_deprecated_args("Use 'nested_level' as an int", "0.1.2",
["nested_level"], once=True)
def create_nested_snapshots_and_attach_volume(self,
size=None,
nested_level=None,
**kwargs):
"""Create a volume from snapshot and attach/detach the volume
This scenario create volume, create it's snapshot, attach volume,
then create new volume from existing snapshot and so on,
with defined nested level, after all detach and delete them.
volume->snapshot->volume->snapshot->volume ...
:param size: Volume size - dictionary, contains two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
default values: {"min": 1, "max": 5}
:param nested_level: Nested level - dictionary or int, dictionary
contains two values:
min - minimum number of volumes will be created
from snapshot;
max - maximum number of volumes will be created
from snapshot.
due to its deprecated would be taken min value.
int, means the exact nested level.
default value: 1.
:param kwargs: Optional parameters used during volume
snapshot creation.
"""
if size is None:
size = {"min": 1, "max": 5}
if nested_level is None:
nested_level = 1
nested_level = nested_level or 1
if isinstance(nested_level, dict):
nested_level = nested_level.get("min", 1)
# NOTE: Volume size cannot be smaller than the snapshot size, so
# volume with specified size should be created to avoid
# size mismatching between volume and snapshot due random
# size in _create_volume method.
size = random.randint(size["min"], size["max"])
source_vol = self._create_volume(size)
nes_objs = [(self.get_random_server(), source_vol,
self._create_snapshot(source_vol.id, False, **kwargs))]
self._attach_volume(nes_objs[0][0], nes_objs[0][1])
snapshot = nes_objs[0][2]
for i in range(nested_level - 1):
volume = self._create_volume(size, snapshot_id=snapshot.id)
snapshot = self._create_snapshot(volume.id, False, **kwargs)
server = self.get_random_server()
self._attach_volume(server, volume)
nes_objs.append((server, volume, snapshot))
nes_objs.reverse()
for server, volume, snapshot in nes_objs:
self._detach_volume(server, volume)
self._delete_snapshot(snapshot)
self._delete_volume(volume)
@validation.required_services(consts.Service.CINDER)
@validation.required_contexts("volumes")
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_list_snapshots(self, force=False, detailed=True, **kwargs):
"""Create and then list a volume-snapshot.
:param force: when set to True, allows snapshot of a volume when
the volume is attached to an instance
:param detailed: True if detailed information about snapshots
should be listed
:param kwargs: optional args to create a snapshot
"""
volume = random.choice(self.context["tenant"]["volumes"])
self._create_snapshot(volume["id"], force=force, **kwargs)
self._list_snapshots(detailed)
@validation.required_services(consts.Service.CINDER, consts.Service.GLANCE)
@validation.required_openstack(users=True)
@validation.required_parameters("size")
@scenario.configure(context={"cleanup": ["cinder", "glance"]})
def create_and_upload_volume_to_image(self, size, force=False,
container_format="bare",
disk_format="raw",
do_delete=True,
**kwargs):
"""Create and upload a volume to image.
:param size: volume size (integers, in GB), or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param force: when set to True volume that is attached to an instance
could be uploaded to image
:param container_format: image container format
:param disk_format: disk format for image
:param do_delete: deletes image and volume after uploading if True
:param kwargs: optional args to create a volume
"""
volume = self._create_volume(size, **kwargs)
image = self._upload_volume_to_image(volume, force, container_format,
disk_format)
if do_delete:
self._delete_volume(volume)
self._delete_image(image)
@validation.required_cinder_services("cinder-backup")
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_volume_backup(self, size, do_delete=True,
create_volume_kwargs=None,
create_backup_kwargs=None):
"""Create a volume backup.
:param size: volume size in GB
:param do_delete: if True, a volume and a volume backup will
be deleted after creation.
:param create_volume_kwargs: optional args to create a volume
:param create_backup_kwargs: optional args to create a volume backup
"""
create_volume_kwargs = create_volume_kwargs or {}
create_backup_kwargs = create_backup_kwargs or {}
volume = self._create_volume(size, **create_volume_kwargs)
backup = self._create_backup(volume.id, **create_backup_kwargs)
if do_delete:
self._delete_volume(volume)
self._delete_backup(backup)
@validation.required_cinder_services("cinder-backup")
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_restore_volume_backup(self, size, do_delete=True,
create_volume_kwargs=None,
create_backup_kwargs=None):
"""Restore volume backup.
:param size: volume size in GB
:param do_delete: if True, the volume and the volume backup will
be deleted after creation.
:param create_volume_kwargs: optional args to create a volume
:param create_backup_kwargs: optional args to create a volume backup
"""
create_volume_kwargs = create_volume_kwargs or {}
create_backup_kwargs = create_backup_kwargs or {}
volume = self._create_volume(size, **create_volume_kwargs)
backup = self._create_backup(volume.id, **create_backup_kwargs)
self._restore_backup(backup.id)
if do_delete:
self._delete_volume(volume)
self._delete_backup(backup)
@validation.required_cinder_services("cinder-backup")
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_list_volume_backups(self, size, detailed=True,
do_delete=True,
create_volume_kwargs=None,
create_backup_kwargs=None):
"""Create and then list a volume backup.
:param size: volume size in GB
:param detailed: True if detailed information about backup
should be listed
:param do_delete: if True, a volume backup will be deleted
:param create_volume_kwargs: optional args to create a volume
:param create_backup_kwargs: optional args to create a volume backup
"""
create_volume_kwargs = create_volume_kwargs or {}
create_backup_kwargs = create_backup_kwargs or {}
volume = self._create_volume(size, **create_volume_kwargs)
backup = self._create_backup(volume.id, **create_backup_kwargs)
self._list_backups(detailed)
if do_delete:
self._delete_volume(volume)
self._delete_backup(backup)
|
paboldin/rally
|
rally/plugins/openstack/scenarios/cinder/volumes.py
|
Python
|
apache-2.0
| 24,768
|
# TODO: Consider rewriting as management command
import logging
import sys
import django
from django.db import transaction
django.setup()
from osf.models import Registration
from scripts import utils as script_utils
from website import settings
from website.app import init_app
from api.share.utils import update_share
logger = logging.getLogger(__name__)
def migrate(dry_run):
assert settings.SHARE_URL, 'SHARE_URL must be set to migrate.'
assert settings.SHARE_API_TOKEN, 'SHARE_API_TOKEN must be set to migrate.'
registrations = Registration.objects.filter(is_deleted=False, is_public=True)
registrations_count = registrations.count()
count = 0
logger.info('Preparing to migrate {} registrations.'.format(registrations_count))
for registration in registrations.iterator():
count += 1
logger.info('{}/{} - {}'.format(count, registrations_count, registration._id))
if not dry_run:
update_share(registration)
logger.info('Registration {} was sent to SHARE.'.format(registration._id))
def main():
dry_run = '--dry' in sys.argv
if not dry_run:
script_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
with transaction.atomic():
migrate(dry_run)
if __name__ == '__main__':
main()
|
mfraezz/osf.io
|
scripts/migration/migrate_share_registration_data.py
|
Python
|
apache-2.0
| 1,325
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import futurist
from futurist import waiters
from oslo_utils import uuidutils
from taskflow.engines.action_engine import executor as base_executor
from taskflow.engines.worker_based import endpoint
from taskflow.engines.worker_based import executor as worker_executor
from taskflow.engines.worker_based import server as worker_server
from taskflow import test
from taskflow.tests import utils as test_utils
from taskflow.types import failure
from taskflow.utils import threading_utils
TEST_EXCHANGE, TEST_TOPIC = ('test-exchange', 'test-topic')
WAIT_TIMEOUT = 1.0
POLLING_INTERVAL = 0.01
class TestPipeline(test.TestCase):
def _fetch_server(self, task_classes):
endpoints = []
for cls in task_classes:
endpoints.append(endpoint.Endpoint(cls))
server = worker_server.Server(
TEST_TOPIC, TEST_EXCHANGE,
futurist.ThreadPoolExecutor(max_workers=1), endpoints,
transport='memory',
transport_options={
'polling_interval': POLLING_INTERVAL,
})
server_thread = threading_utils.daemon_thread(server.start)
return (server, server_thread)
def _fetch_executor(self):
executor = worker_executor.WorkerTaskExecutor(
uuidutils.generate_uuid(),
TEST_EXCHANGE,
[TEST_TOPIC],
transport='memory',
transport_options={
'polling_interval': POLLING_INTERVAL,
})
return executor
def _start_components(self, task_classes):
server, server_thread = self._fetch_server(task_classes)
executor = self._fetch_executor()
self.addCleanup(executor.stop)
self.addCleanup(server_thread.join)
self.addCleanup(server.stop)
executor.start()
server_thread.start()
server.wait()
return (executor, server)
def test_execution_pipeline(self):
executor, server = self._start_components([test_utils.TaskOneReturn])
self.assertEqual(0, executor.wait_for_workers(timeout=WAIT_TIMEOUT))
t = test_utils.TaskOneReturn()
progress_callback = lambda *args, **kwargs: None
f = executor.execute_task(t, uuidutils.generate_uuid(), {},
progress_callback=progress_callback)
waiters.wait_for_any([f])
event, result = f.result()
self.assertEqual(1, result)
self.assertEqual(base_executor.EXECUTED, event)
def test_execution_failure_pipeline(self):
task_classes = [
test_utils.TaskWithFailure,
]
executor, server = self._start_components(task_classes)
t = test_utils.TaskWithFailure()
progress_callback = lambda *args, **kwargs: None
f = executor.execute_task(t, uuidutils.generate_uuid(), {},
progress_callback=progress_callback)
waiters.wait_for_any([f])
action, result = f.result()
self.assertIsInstance(result, failure.Failure)
self.assertEqual(RuntimeError, result.check(RuntimeError))
self.assertEqual(base_executor.EXECUTED, action)
|
junneyang/taskflow
|
taskflow/tests/unit/worker_based/test_pipeline.py
|
Python
|
apache-2.0
| 3,812
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# [START documentai_process_splitter_document]
# TODO(developer): Uncomment these variables before running the sample.
# project_id= 'YOUR_PROJECT_ID'
# location = 'YOUR_PROJECT_LOCATION' # Format is 'us' or 'eu'
# processor_id = 'YOUR_PROCESSOR_ID' # Create processor in Cloud Console
# file_path = '/path/to/local/pdf'
def process_document_splitter_sample(
project_id: str, location: str, processor_id: str, file_path: str
):
from google.cloud import documentai_v1beta3 as documentai
# You must set the api_endpoint if you use a location other than 'us', e.g.:
opts = {}
if location == "eu":
opts = {"api_endpoint": "eu-documentai.googleapis.com"}
client = documentai.DocumentProcessorServiceClient(client_options=opts)
# The full resource name of the processor, e.g.:
# projects/project-id/locations/location/processor/processor-id
# You must create new processors in the Cloud Console first
name = f"projects/{project_id}/locations/{location}/processors/{processor_id}"
with open(file_path, "rb") as image:
image_content = image.read()
# Read the file into memory
document = {"content": image_content, "mime_type": "application/pdf"}
# Configure the process request
request = {"name": name, "raw_document": document}
# Recognizes text entities in the PDF document
result = client.process_document(request=request)
print("Document processing complete.\n")
# Read the splitter output from the document splitter processor:
# https://cloud.google.com/document-ai/docs/processors-list#processor_doc-splitter
# This processor only provides text for the document and information on how
# to split the document on logical boundaries. To identify and extract text,
# form elements, and entities please see other processors like the OCR, form,
# and specalized processors.
document = result.document
print(f"Found {len(document.entities)} subdocuments:")
for entity in document.entities:
conf_percent = "{:.1%}".format(entity.confidence)
pages_range = page_refs_to_string(entity.page_anchor.page_refs)
# Print subdocument type information, if available
try:
doctype = entity.type
print(
f'{conf_percent} confident that {pages_range} a "{doctype}" subdocument.'
)
except AttributeError:
print(f"{conf_percent} confident that {pages_range} a subdocument.")
def page_refs_to_string(page_refs: dict) -> str:
""" Converts a page ref to a string describing the page or page range."""
if len(page_refs) == 1:
num = str(int(page_refs[0].page) + 1)
return f"page {num} is"
else:
start = str(int(page_refs[0].page) + 1)
end = str(int(page_refs[1].page) + 1)
return f"pages {start} to {end} are"
# [END documentai_process_splitter_document]
|
googleapis/python-documentai
|
samples/snippets/process_document_splitter_sample.py
|
Python
|
apache-2.0
| 3,497
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-18 00:44
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import inventory.common.model_mixins
import inventory.common.storage
class Migration(migrations.Migration):
initial = True
dependencies = [
('regions', '0001_initial'),
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('public_id', models.CharField(blank=True, help_text='Public ID to identify a individual user.', max_length=30, unique=True, verbose_name='Public User ID')),
('_role', models.SmallIntegerField(choices=[(0, 'Default User'), (1, 'Administrator')], default=0, help_text='The role of the user.', verbose_name='Role')),
('picture', models.ImageField(blank=True, help_text='Photo of the individual.', null=True, storage=inventory.common.storage.InventoryFileStorage(), upload_to=inventory.common.storage.create_file_path, verbose_name='Picture')),
('send_email', models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], default=False, help_text='Set to YES if this individual needs to be sent an email.', verbose_name='Send Email')),
('need_password', models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], default=False, help_text='Set to YES if this individual needs to reset their password.', verbose_name='Need Password')),
('dob', models.DateField(blank=True, help_text='The date of your birth.', null=True, verbose_name='Date of Birth')),
('address_01', models.CharField(blank=True, help_text='Address line one.', max_length=50, null=True, verbose_name='Address 1')),
('address_02', models.CharField(blank=True, help_text='Address line two.', max_length=50, null=True, verbose_name='Address 2')),
('city', models.CharField(blank=True, help_text='The city this individual lives in.', max_length=30, null=True, verbose_name='City')),
('postal_code', models.CharField(blank=True, help_text='The zip code of residence.', max_length=15, null=True, verbose_name='Postal Code')),
('project_default', models.CharField(blank=True, help_text='The default project public_id.', max_length=30, null=True, verbose_name='Project Default')),
('country', models.ForeignKey(blank=True, help_text='The country of residence.', null=True, on_delete=django.db.models.deletion.CASCADE, to='regions.Country', verbose_name='Country')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('language', models.ForeignKey(blank=True, help_text='The language code.', null=True, on_delete=django.db.models.deletion.CASCADE, to='regions.Language', verbose_name='Language')),
('subdivision', models.ForeignKey(blank=True, help_text='The state of residence.', null=True, on_delete=django.db.models.deletion.CASCADE, to='regions.Subdivision', verbose_name='State/Province')),
('timezone', models.ForeignKey(blank=True, help_text='The timezone.', null=True, on_delete=django.db.models.deletion.CASCADE, to='regions.TimeZone', verbose_name='Timezone')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name_plural': 'Users',
'ordering': ('last_name', 'username'),
'verbose_name': 'User',
},
bases=(inventory.common.model_mixins.ValidateOnSaveMixin, models.Model),
),
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(help_text='The date and time of creation.', verbose_name='Date Created')),
('updated', models.DateTimeField(help_text='The date and time last updated.', verbose_name='Last Updated')),
('public_id', models.CharField(blank=True, help_text='Public ID to identify an individual secure answer.', max_length=30, unique=True, verbose_name='Public Answer ID')),
('answer', models.CharField(help_text='An answer to an authentication question.', max_length=250, verbose_name='Answer')),
('creator', models.ForeignKey(editable=False, help_text='The user who created this record.', on_delete=django.db.models.deletion.CASCADE, related_name='accounts_answer_creator_related', to=settings.AUTH_USER_MODEL, verbose_name='Creator')),
],
options={
'verbose_name_plural': 'Answers',
'ordering': ('question__question',),
'verbose_name': 'Answer',
},
bases=(inventory.common.model_mixins.ValidateOnSaveMixin, models.Model),
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(help_text='The date and time of creation.', verbose_name='Date Created')),
('updated', models.DateTimeField(help_text='The date and time last updated.', verbose_name='Last Updated')),
('active', models.BooleanField(default=True, help_text='If checked the record is active.', verbose_name='Active')),
('public_id', models.CharField(blank=True, help_text='Public ID to identify an individual security question.', max_length=30, unique=True, verbose_name='Public Question ID')),
('question', models.CharField(help_text='A question for authentication.', max_length=100, verbose_name='Question')),
('creator', models.ForeignKey(editable=False, help_text='The user who created this record.', on_delete=django.db.models.deletion.CASCADE, related_name='accounts_question_creator_related', to=settings.AUTH_USER_MODEL, verbose_name='Creator')),
('updater', models.ForeignKey(editable=False, help_text='The last user to update this record.', on_delete=django.db.models.deletion.CASCADE, related_name='accounts_question_updater_related', to=settings.AUTH_USER_MODEL, verbose_name='Updater')),
],
options={
'verbose_name_plural': 'Questions',
'ordering': ('question',),
'verbose_name': 'Question',
},
bases=(inventory.common.model_mixins.ValidateOnSaveMixin, models.Model),
),
migrations.AddField(
model_name='answer',
name='question',
field=models.ForeignKey(help_text='The question relative to this answer.', on_delete=django.db.models.deletion.CASCADE, related_name='answers', to='accounts.Question', verbose_name='Question'),
),
migrations.AddField(
model_name='answer',
name='updater',
field=models.ForeignKey(editable=False, help_text='The last user to update this record.', on_delete=django.db.models.deletion.CASCADE, related_name='accounts_answer_updater_related', to=settings.AUTH_USER_MODEL, verbose_name='Updater'),
),
migrations.AddField(
model_name='answer',
name='user',
field=models.ForeignKey(help_text='User to which this answer applies.', on_delete=django.db.models.deletion.CASCADE, related_name='answers', to=settings.AUTH_USER_MODEL, verbose_name='User'),
),
]
|
cnobile2012/inventory
|
inventory/accounts/migrations/0001_initial.py
|
Python
|
mit
| 9,769
|
# Copyright 2010-2011 OpenStack Foundation
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
import testtools
_TRUE_VALUES = ('true', '1', 'yes')
# FIXME(dhellmann) Update this to use oslo.test library
class TestCase(testtools.TestCase):
"""Test case base class for all unit tests."""
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.log_fixture = self.useFixture(fixtures.FakeLogger())
|
JioCloud/oslo.db
|
tests/base.py
|
Python
|
apache-2.0
| 1,951
|
# This file contains custom hooks for wpull with the following functionality:
# - a domain whitelist
# - a regex-based URL blacklist
# - completion notification
import os
import re
WHITELIST_LOCATION = os.environ.get('WHITELIST', '/data/whitelist.txt')
BLACKLIST_LOCATION = os.environ.get('BLACKLIST', '/data/blacklist.txt')
def load_whitelist():
"""
Load whitelist of allowed domains
"""
whitelist = {}
if os.path.isfile(WHITELIST_LOCATION):
with open(WHITELIST_LOCATION, 'r') as f:
lines = f.readlines()
whitelist = {line.rstrip() for line in lines}
print('Registered whitelist with %d entries' % len(whitelist))
return whitelist
def load_blacklist():
"""
Load regular expressions to exclude URLs
"""
blacklist = {}
if os.path.isfile(BLACKLIST_LOCATION):
with open(BLACKLIST_LOCATION, 'r') as f:
lines = f.readlines()
blacklist = {re.compile(line.rstrip()) for line in lines}
print('Registered blacklist with %d entries' % len(blacklist))
return blacklist
def validate_urls():
"""
Apply rules for URL inclusion/exclusion
"""
whitelist = load_whitelist()
blacklist = load_blacklist()
def accept_url(url_info, record_info, verdict, reasons):
# If our whitelist isn't empty, only allow domains it includes
if len(whitelist) > 0 and url_info['hostname'] not in whitelist:
return False
# Exclude any URL that matches the pattern in the blacklist
for rule in blacklist:
if rule.search(url_info['url']):
return False
# Otherwise, defer to wpull's decision
return verdict
wpull_hook.callbacks.accept_url = accept_url
def completion_hook():
"""
Trigger an optional hook when the crawl completes
For the hook to be triggered, the script's original working directory must
contain a file named "complete.py" (or a module of the same name) with a
function named "on_complete" defined.
on_complete must have the following signature:
on_complete(start_time, end_time, num_urls, bytes_downloaded)
"""
if os.path.isfile('../complete.py'):
import sys
sys.path.append('..')
import complete
wpull_hook.callbacks.finishing_statistics = complete.on_complete
print('Registered completion hook')
validate_urls()
completion_hook()
|
nmalkin/crawl
|
hook.py
|
Python
|
bsd-2-clause
| 2,440
|
import cmsisdsp as dsp
import numpy as np
from scipy import signal
from pylab import figure, clf, plot, xlabel, ylabel, xlim, ylim, title, grid, axes, show,semilogx, semilogy
# Data file from https://archive.physionet.org/pn3/ecgiddb/Person_87/rec_2.dat
def q31sat(x):
if x > 0x7FFFFFFF:
return(np.int32(0x7FFFFFFF))
elif x < -0x80000000:
return(np.int32(0x80000000))
else:
return(np.int32(x))
q31satV=np.vectorize(q31sat)
def toQ31(x):
return(q31satV(np.round(x * (1<<31))))
def Q31toF32(x):
return(1.0*x / 2**31)
filename = 'rec_2.dat'
f = open(filename,"r")
sig = np.fromfile(f, dtype=np.int16)
f.closed
sig = 1.0*sig / (1 << 12)
p0 = np.exp(1j*0.05) * 0.98
p1 = np.exp(1j*0.25) * 0.9
p2 = np.exp(1j*0.45) * 0.97
z0 = np.exp(1j*0.02)
z1 = np.exp(1j*0.65)
z2 = np.exp(1j*1.0)
g = 0.02
nb = 300
sos = signal.zpk2sos(
[z0,np.conj(z0),z1,np.conj(z1),z2,np.conj(z2)]
,[p0, np.conj(p0),p1, np.conj(p1),p2, np.conj(p2)]
,g)
res=signal.sosfilt(sos,sig)
figure()
plot(sig[1:nb])
figure()
plot(res[1:nb])
biquadQ31 = dsp.arm_biquad_casd_df1_inst_q31()
numStages=3
state=np.zeros(numStages*4)
# For use in CMSIS, denominator coefs must be negated
# and first a0 coef wihich is always 1 must be removed
coefs=np.reshape(np.hstack((sos[:,:3],-sos[:,4:])),15)
coefs = coefs / 4.0
coefsQ31 = toQ31(coefs)
postshift = 2
dsp.arm_biquad_cascade_df1_init_q31(biquadQ31,numStages,coefsQ31,state,postshift)
sigQ31=toQ31(sig)
nbSamples=sigQ31.shape[0]
# Here we demonstrate how we can process a long sequence of samples per block
# and thus check that the state of the biquad is well updated and preserved
# between the calls.
half = int(round(nbSamples / 2))
res2a=dsp.arm_biquad_cascade_df1_q31(biquadQ31,sigQ31[1:half])
res2b=dsp.arm_biquad_cascade_df1_q31(biquadQ31,sigQ31[half+1:nbSamples])
res2=Q31toF32(np.hstack((res2a,res2b)))
figure()
plot(res2[1:nb])
show()#
|
ARM-software/CMSIS_5
|
CMSIS/DSP/PythonWrapper/examples/example.py
|
Python
|
apache-2.0
| 1,943
|
""" This module contain solvers for all kinds of equations:
- algebraic, use solve()
- recurrence, use rsolve()
- differential, use dsolve()
- transcendental, use tsolve()
- nonlinear (numerically), use nsolve()
(you will need a good starting point)
"""
from sympy.core.sympify import sympify
from sympy.core.basic import S, Mul
from sympy.core.add import Add
from sympy.core.power import Pow
from sympy.core.symbol import Symbol, Wild
from sympy.core.relational import Equality
from sympy.core.numbers import ilcm
from sympy.functions import sqrt, log, exp, LambertW
from sympy.simplify import simplify, collect
from sympy.matrices import Matrix, zeros
from sympy.polys import roots
from sympy.utilities import any, all
from sympy.utilities.iterables import iff
from sympy.utilities.lambdify import lambdify
from sympy.mpmath import findroot
from sympy.solvers.polysys import solve_poly_system
from warnings import warn
# Codes for guess solve strategy
GS_POLY = 0
GS_RATIONAL = 1
GS_POLY_CV_1 = 2 # can be converted to a polynomial equation via the change of variable y -> x**a, a real
GS_POLY_CV_2 = 3 # can be converted to a polynomial equation multiplying on both sides by x**m
# for example, x + 1/x == 0. Multiplying by x yields x**2 + x == 0
GS_RATIONAL_CV_1 = 4 # can be converted to a rational equation via the change of variable y -> x**n
GS_TRANSCENDENTAL = 5
def guess_solve_strategy(expr, symbol):
"""
Tries to guess what approach should be used to solve a specific equation
Returns
=======
- -1: could not guess
- integer > 0: code representing certain type of equation. See GS_* fields
on this module for a complete list
Examples
========
>>> from sympy import Symbol, Rational
>>> x = Symbol('x')
>>> guess_solve_strategy(x**2 + 1, x)
0
>>> guess_solve_strategy(x**Rational(1,2) + 1, x)
2
"""
eq_type = -1
if expr.is_Add:
return max([guess_solve_strategy(i, symbol) for i in expr.args])
elif expr.is_Mul:
# check for rational functions
num, denom = expr.as_numer_denom()
if denom != 1 and denom.has(symbol):
#we have a quotient
m = max(guess_solve_strategy(num, symbol), guess_solve_strategy(denom, symbol))
if m == GS_POLY:
return GS_RATIONAL
elif m == GS_POLY_CV_1:
return GS_RATIONAL_CV_1
else:
raise NotImplementedError
else:
return max([guess_solve_strategy(i, symbol) for i in expr.args])
elif expr.is_Symbol:
return GS_POLY
elif expr.is_Pow:
if expr.exp.has(symbol):
return GS_TRANSCENDENTAL
elif not expr.exp.has(symbol) and expr.base.has(symbol):
if expr.exp.is_Integer and expr.exp > 0:
eq_type = max(eq_type, GS_POLY)
elif expr.exp.is_Integer and expr.exp < 0:
eq_type = max(eq_type, GS_POLY_CV_2)
elif expr.exp.is_Rational:
eq_type = max(eq_type, GS_POLY_CV_1)
else:
return GS_TRANSCENDENTAL
elif expr.is_Function and expr.has(symbol):
return GS_TRANSCENDENTAL
elif not expr.has(symbol):
return GS_POLY
return eq_type
def solve(f, *symbols, **flags):
"""Solves equations and systems of equations.
Currently supported are univariate polynomial and transcendental
equations and systems of linear and polynomial equations. Input
is formed as a single expression or an equation, or an iterable
container in case of an equation system. The type of output may
vary and depends heavily on the input. For more details refer to
more problem specific functions.
By default all solutions are simplified to make the output more
readable. If this is not the expected behavior (e.g., because of
speed issues) set simplified=False in function arguments.
To solve equations and systems of equations like recurrence relations
or differential equations, use rsolve() or dsolve(), respectively.
>>> from sympy import *
>>> x,y = symbols('xy')
Solve a polynomial equation:
>>> solve(x**4-1, x)
[1, -1, -I, I]
Solve a linear system:
>>> solve((x+5*y-2, -3*x+6*y-15), x, y)
{x: -3, y: 1}
"""
def sympit(w):
return map(sympify, iff(isinstance(w,(list, tuple, set)), w, [w]))
# make f and symbols into lists of sympified quantities
# keeping track of how f was passed since if it is a list
# a dictionary of results will be returned.
bare_f = not isinstance(f, (list, tuple, set))
f, symbols = (sympit(w) for w in [f, symbols])
for i, fi in enumerate(f):
if isinstance(fi, Equality):
f[i] = fi.lhs - fi.rhs
if not symbols:
#get symbols from equations or supply dummy symbols since
#solve(3,x) returns []...though it seems that it should raise some sort of error TODO
symbols = set([])
for fi in f:
symbols |= fi.atoms(Symbol) or set([Symbol('x',dummy=True)])
symbols = list(symbols)
if bare_f:
f=f[0]
if len(symbols) == 1:
if isinstance(symbols[0], (list, tuple, set)):
symbols = symbols[0]
result = list()
# Begin code handling for Function and Derivative instances
# Basic idea: store all the passed symbols in symbols_passed, check to see
# if any of them are Function or Derivative types, if so, use a dummy
# symbol in their place, and set symbol_swapped = True so that other parts
# of the code can be aware of the swap. Once all swapping is done, the
# continue on with regular solving as usual, and swap back at the end of
# the routine, so that whatever was passed in symbols is what is returned.
symbols_new = []
symbol_swapped = False
symbols_passed = list(symbols)
for i, s in enumerate(symbols):
if s.is_Symbol:
s_new = s
elif s.is_Function:
symbol_swapped = True
s_new = Symbol('F%d' % i, dummy=True)
elif s.is_Derivative:
symbol_swapped = True
s_new = Symbol('D%d' % i, dummy=True)
else:
raise TypeError('not a Symbol or a Function')
symbols_new.append(s_new)
if symbol_swapped:
swap_back_dict = dict(zip(symbols_new, symbols))
# End code for handling of Function and Derivative instances
if not isinstance(f, (tuple, list, set)):
# Create a swap dictionary for storing the passed symbols to be solved
# for, so that they may be swapped back.
if symbol_swapped:
swap_dict = zip(symbols, symbols_new)
f = f.subs(swap_dict)
symbols = symbols_new
if len(symbols) != 1:
result = {}
for s in symbols:
result[s] = solve(f, s, **flags)
if flags.get('simplified', True):
for s, r in result.items():
result[s] = map(simplify, r)
return result
symbol = symbols[0]
strategy = guess_solve_strategy(f, symbol)
if strategy == GS_POLY:
poly = f.as_poly( symbol )
if poly is None:
raise NotImplementedError("Cannot solve equation " + str(f) + " for "
+ str(symbol))
result = roots(poly, cubics=True, quartics=True).keys()
elif strategy == GS_RATIONAL:
P, Q = f.as_numer_denom()
#TODO: check for Q != 0
result = solve(P, symbol, **flags)
elif strategy == GS_POLY_CV_1:
args = list(f.args)
if isinstance(f, Add):
# we must search for a suitable change of variable
# collect exponents
exponents_denom = list()
for arg in args:
if isinstance(arg, Pow):
exponents_denom.append(arg.exp.q)
elif isinstance(arg, Mul):
for mul_arg in arg.args:
if isinstance(mul_arg, Pow):
exponents_denom.append(mul_arg.exp.q)
assert len(exponents_denom) > 0
if len(exponents_denom) == 1:
m = exponents_denom[0]
else:
# get the GCD of the denominators
m = reduce(ilcm, exponents_denom)
# x -> y**m.
# we assume positive for simplification purposes
t = Symbol('t', positive=True, dummy=True)
f_ = f.subs(symbol, t**m)
if guess_solve_strategy(f_, t) != GS_POLY:
raise NotImplementedError("Could not convert to a polynomial equation: %s" % f_)
cv_sols = solve(f_, t)
for sol in cv_sols:
result.append(sol**m)
elif isinstance(f, Mul):
for mul_arg in args:
result.extend(solve(mul_arg, symbol))
elif strategy == GS_POLY_CV_2:
m = 0
args = list(f.args)
if isinstance(f, Add):
for arg in args:
if isinstance(arg, Pow):
m = min(m, arg.exp)
elif isinstance(arg, Mul):
for mul_arg in arg.args:
if isinstance(mul_arg, Pow):
m = min(m, mul_arg.exp)
elif isinstance(f, Mul):
for mul_arg in args:
if isinstance(mul_arg, Pow):
m = min(m, mul_arg.exp)
f1 = simplify(f*symbol**(-m))
result = solve(f1, symbol)
# TODO: we might have introduced unwanted solutions
# when multiplied by x**-m
elif strategy == GS_TRANSCENDENTAL:
#a, b = f.as_numer_denom()
# Let's throw away the denominator for now. When we have robust
# assumptions, it should be checked, that for the solution,
# b!=0.
result = tsolve(f, *symbols)
elif strategy == -1:
raise ValueError('Could not parse expression %s' % f)
else:
raise NotImplementedError("No algorithms are implemented to solve equation %s" % f)
# This symbol swap should not be necessary for the single symbol case: if you've
# solved for the symbol the it will not appear in the solution. Right now, however
# ode's are getting solutions for solve (even though they shouldn't be -- see the
# swap_back test in test_solvers).
if symbol_swapped:
result = [ri.subs(swap_back_dict) for ri in result]
if flags.get('simplified', True) and strategy != GS_RATIONAL:
return map(simplify, result)
else:
return result
else:
if not f:
return {}
else:
# Create a swap dictionary for storing the passed symbols to be
# solved for, so that they may be swapped back.
if symbol_swapped:
swap_dict = zip(symbols, symbols_new)
f = [fi.subs(swap_dict) for fi in f]
symbols = symbols_new
polys = []
for g in f:
poly = g.as_poly(*symbols)
if poly is not None:
polys.append(poly)
else:
raise NotImplementedError()
if all(p.is_linear for p in polys):
n, m = len(f), len(symbols)
matrix = zeros((n, m + 1))
for i, poly in enumerate(polys):
for coeff, monom in poly.iter_terms():
try:
j = list(monom).index(1)
matrix[i, j] = coeff
except ValueError:
matrix[i, m] = -coeff
soln = solve_linear_system(matrix, *symbols, **flags)
else:
soln = solve_poly_system(polys)
# Use swap_dict to ensure we return the same type as what was
# passed
if symbol_swapped:
if isinstance(soln, dict):
res = {}
for k in soln.keys():
res.update({swap_back_dict[k]: soln[k]})
return res
else:
return soln
else:
return soln
def solve_linear_system(system, *symbols, **flags):
"""Solve system of N linear equations with M variables, which means
both Cramer and over defined systems are supported. The possible
number of solutions is zero, one or infinite. Respectively this
procedure will return None or dictionary with solutions. In the
case of over defined system all arbitrary parameters are skipped.
This may cause situation in with empty dictionary is returned.
In this case it means all symbols can be assigned arbitrary values.
Input to this functions is a Nx(M+1) matrix, which means it has
to be in augmented form. If you are unhappy with such setting
use 'solve' method instead, where you can input equations
explicitly. And don't worry about the matrix, this function
is persistent and will make a local copy of it.
The algorithm used here is fraction free Gaussian elimination,
which results, after elimination, in upper-triangular matrix.
Then solutions are found using back-substitution. This approach
is more efficient and compact than the Gauss-Jordan method.
>>> from sympy import *
>>> x, y = symbols('xy')
Solve the following system:
x + 4 y == 2
-2 x + y == 14
>>> system = Matrix(( (1, 4, 2), (-2, 1, 14)))
>>> solve_linear_system(system, x, y)
{x: -6, y: 2}
"""
matrix = system[:,:]
syms = list(symbols)
i, m = 0, matrix.cols-1 # don't count augmentation
while i < matrix.rows:
if i == m:
# an overdetermined system
if any(matrix[i:,m]):
return None # no solutions
else:
# remove trailing rows
matrix = matrix[:i,:]
break
if not matrix[i, i]:
# there is no pivot in current column
# so try to find one in other columns
for k in xrange(i+1, m):
if matrix[i, k]:
break
else:
if matrix[i, m]:
return None # no solutions
else:
# zero row or was a linear combination of
# other rows so now we can safely skip it
matrix.row_del(i)
continue
# we want to change the order of colums so
# the order of variables must also change
syms[i], syms[k] = syms[k], syms[i]
matrix.col_swap(i, k)
pivot_inv = S.One / matrix [i, i]
# divide all elements in the current row by the pivot
matrix.row(i, lambda x, _: x * pivot_inv)
for k in xrange(i+1, matrix.rows):
if matrix[k, i]:
coeff = matrix[k, i]
# subtract from the current row the row containing
# pivot and multiplied by extracted coefficient
matrix.row(k, lambda x, j: simplify(x - matrix[i, j]*coeff))
i += 1
# if there weren't any problems, augmented matrix is now
# in row-echelon form so we can check how many solutions
# there are and extract them using back substitution
simplified = flags.get('simplified', True)
if len(syms) == matrix.rows:
# this system is Cramer equivalent so there is
# exactly one solution to this system of equations
k, solutions = i-1, {}
while k >= 0:
content = matrix[k, m]
# run back-substitution for variables
for j in xrange(k+1, m):
content -= matrix[k, j]*solutions[syms[j]]
if simplified:
solutions[syms[k]] = simplify(content)
else:
solutions[syms[k]] = content
k -= 1
return solutions
elif len(syms) > matrix.rows:
# this system will have infinite number of solutions
# dependent on exactly len(syms) - i parameters
k, solutions = i-1, {}
while k >= 0:
content = matrix[k, m]
# run back-substitution for variables
for j in xrange(k+1, i):
content -= matrix[k, j]*solutions[syms[j]]
# run back-substitution for parameters
for j in xrange(i, m):
content -= matrix[k, j]*syms[j]
if simplified:
solutions[syms[k]] = simplify(content)
else:
solutions[syms[k]] = content
k -= 1
return solutions
else:
return None # no solutions
def solve_undetermined_coeffs(equ, coeffs, sym, **flags):
"""Solve equation of a type p(x; a_1, ..., a_k) == q(x) where both
p, q are univariate polynomials and f depends on k parameters.
The result of this functions is a dictionary with symbolic
values of those parameters with respect to coefficients in q.
This functions accepts both Equations class instances and ordinary
SymPy expressions. Specification of parameters and variable is
obligatory for efficiency and simplicity reason.
>>> from sympy import *
>>> a, b, c, x = symbols('a', 'b', 'c', 'x')
>>> solve_undetermined_coeffs(Eq(2*a*x + a+b, x), [a, b], x)
{a: 1/2, b: -1/2}
>>> solve_undetermined_coeffs(Eq(a*c*x + a+b, x), [a, b], x)
{a: 1/c, b: -1/c}
"""
if isinstance(equ, Equality):
# got equation, so move all the
# terms to the left hand side
equ = equ.lhs - equ.rhs
system = collect(equ.expand(), sym, evaluate=False).values()
if not any([ equ.has(sym) for equ in system ]):
# consecutive powers in the input expressions have
# been successfully collected, so solve remaining
# system using Gaussian elimination algorithm
return solve(system, *coeffs, **flags)
else:
return None # no solutions
def solve_linear_system_LU(matrix, syms):
""" LU function works for invertible only """
assert matrix.rows == matrix.cols-1
A = matrix[:matrix.rows,:matrix.rows]
b = matrix[:,matrix.cols-1:]
soln = A.LUsolve(b)
solutions = {}
for i in range(soln.rows):
solutions[syms[i]] = soln[i,0]
return solutions
x = Symbol('x', dummy=True)
a,b,c,d,e,f,g,h = [Wild(t, exclude=[x]) for t in 'abcdefgh']
patterns = None
def _generate_patterns():
"""
Generates patterns for transcendental equations.
This is lazily calculated (called) in the tsolve() function and stored in
the patterns global variable.
"""
tmp1 = f ** (h-(c*g/b))
tmp2 = (-e*tmp1/a)**(1/d)
global patterns
patterns = [
(a*(b*x+c)**d + e , ((-(e/a))**(1/d)-c)/b),
( b+c*exp(d*x+e) , (log(-b/c)-e)/d),
(a*x+b+c*exp(d*x+e) , -b/a-LambertW(c*d*exp(e-b*d/a)/a)/d),
( b+c*f**(d*x+e) , (log(-b/c)-e*log(f))/d/log(f)),
(a*x+b+c*f**(d*x+e) , -b/a-LambertW(c*d*f**(e-b*d/a)*log(f)/a)/d/log(f)),
( b+c*log(d*x+e) , (exp(-b/c)-e)/d),
(a*x+b+c*log(d*x+e) , -e/d+c/a*LambertW(a/c/d*exp(-b/c+a*e/c/d))),
(a*(b*x+c)**d + e*f**(g*x+h) , -c/b-d*LambertW(-tmp2*g*log(f)/b/d)/g/log(f))
]
def tsolve(eq, sym):
"""
Solves a transcendental equation with respect to the given
symbol. Various equations containing mixed linear terms, powers,
and logarithms, can be solved.
Only a single solution is returned. This solution is generally
not unique. In some cases, a complex solution may be returned
even though a real solution exists.
>>> from sympy import *
>>> x = Symbol('x')
>>> tsolve(3**(2*x+5)-4, x)
[(-5*log(3) + log(4))/(2*log(3))]
>>> tsolve(log(x) + 2*x, x)
[LambertW(2)/2]
"""
if patterns is None:
_generate_patterns()
eq = sympify(eq)
if isinstance(eq, Equality):
eq = eq.lhs - eq.rhs
sym = sympify(sym)
eq2 = eq.subs(sym, x)
# First see if the equation has a linear factor
# In that case, the other factor can contain x in any way (as long as it
# is finite), and we have a direct solution to which we add others that
# may be found for the remaining portion.
r = Wild('r')
m = eq2.match((a*x+b)*r)
if m and m[a]:
return [(-b/a).subs(m).subs(x, sym)] + solve(m[r], x)
for p, sol in patterns:
m = eq2.match(p)
if m:
return [sol.subs(m).subs(x, sym)]
# let's also try to inverse the equation
lhs = eq
rhs = S.Zero
while True:
indep, dep = lhs.as_independent(sym)
# dep + indep == rhs
if lhs.is_Add:
# this indicates we have done it all
if indep is S.Zero:
break
lhs = dep
rhs-= indep
# dep * indep == rhs
else:
# this indicates we have done it all
if indep is S.One:
break
lhs = dep
rhs/= indep
# -1
# f(x) = g -> x = f (g)
if lhs.is_Function and lhs.nargs==1 and hasattr(lhs, 'inverse'):
rhs = lhs.inverse() (rhs)
lhs = lhs.args[0]
sol = solve(lhs-rhs, sym)
return sol
elif lhs.is_Add:
# just a simple case - we do variable substitution for first function,
# and if it removes all functions - let's call solve.
# x -x -1
# UC: e + e = y -> t + t = y
t = Symbol('t', dummy=True)
terms = lhs.args
# find first term which is Function
for f1 in lhs.args:
if f1.is_Function:
break
else:
raise NotImplementedError("Unable to solve the equation" + \
"(tsolve: at least one Function expected at this point")
# perform the substitution
lhs_ = lhs.subs(f1, t)
# if no Functions left, we can proceed with usual solve
if not (lhs_.is_Function or
any(term.is_Function for term in lhs_.args)):
cv_sols = solve(lhs_ - rhs, t)
for sol in cv_sols:
if sol.has(sym):
raise NotImplementedError("Unable to solve the equation")
cv_inv = solve( t - f1, sym )[0]
sols = list()
for sol in cv_sols:
sols.append(cv_inv.subs(t, sol))
return sols
raise NotImplementedError("Unable to solve the equation.")
def msolve(*args, **kwargs):
"""
Compatibility wrapper pointing to nsolve().
msolve() has been renamed to nsolve(), please use nsolve() directly."""
warn('msolve() is has been renamed, please use nsolve() instead',
DeprecationWarning)
args[0], args[1] = args[1], args[0]
return nsolve(*args, **kwargs)
# TODO: option for calculating J numerically
def nsolve(*args, **kwargs):
"""
Solve a nonlinear equation system numerically.
nsolve(f, [args,] x0, modules=['mpmath'], **kwargs)
f is a vector function of symbolic expressions representing the system.
args are the variables. If there is only one variable, this argument can be
omitted.
x0 is a starting vector close to a solution.
Use the modules keyword to specify which modules should be used to evaluate
the function and the Jacobian matrix. Make sure to use a module that
supports matrices. For more information on the syntax, please see the
docstring of lambdify.
Overdetermined systems are supported.
>>> from sympy import Symbol, nsolve
>>> import sympy
>>> sympy.mpmath.mp.dps = 15
>>> x1 = Symbol('x1')
>>> x2 = Symbol('x2')
>>> f1 = 3 * x1**2 - 2 * x2**2 - 1
>>> f2 = x1**2 - 2 * x1 + x2**2 + 2 * x2 - 8
>>> print nsolve((f1, f2), (x1, x2), (-1, 1))
[-1.19287309935246]
[ 1.27844411169911]
For one-dimensional functions the syntax is simplified:
>>> from sympy import sin
>>> nsolve(sin(x), x, 2)
3.14159265358979
>>> nsolve(sin(x), 2)
3.14159265358979
mpmath.findroot is used, you can find there more extensive documentation,
especially concerning keyword parameters and available solvers.
"""
# interpret arguments
if len(args) == 3:
f = args[0]
fargs = args[1]
x0 = args[2]
elif len(args) == 2:
f = args[0]
fargs = None
x0 = args[1]
elif len(args) < 2:
raise TypeError('nsolve expected at least 2 arguments, got %i'
% len(args))
else:
raise TypeError('nsolve expected at most 3 arguments, got %i'
% len(args))
modules = kwargs.get('modules', ['mpmath'])
if isinstance(f, (list, tuple)):
f = Matrix(f).T
if not isinstance(f, Matrix):
# assume it's a sympy expression
if isinstance(f, Equality):
f = f.lhs - f.rhs
f = f.evalf()
atoms = set(s for s in f.atoms() if isinstance(s, Symbol))
if fargs is None:
fargs = atoms.copy().pop()
if not (len(atoms) == 1 and (fargs in atoms or fargs[0] in atoms)):
raise ValueError('expected a one-dimensional and numerical function')
f = lambdify(fargs, f, modules)
return findroot(f, x0, **kwargs)
if len(fargs) > f.cols:
raise NotImplementedError('need at least as many equations as variables')
verbose = kwargs.get('verbose', False)
if verbose:
print 'f(x):'
print f
# derive Jacobian
J = f.jacobian(fargs)
if verbose:
print 'J(x):'
print J
# create functions
f = lambdify(fargs, f.T, modules)
J = lambdify(fargs, J, modules)
# solve the system numerically
x = findroot(f, x0, J=J, **kwargs)
return x
|
jbaayen/sympy
|
sympy/solvers/solvers.py
|
Python
|
bsd-3-clause
| 26,616
|
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
import nose
from nose.tools import assert_raises, raises
from sklearn.utils.testing import assert_greater
from sklearn.linear_model import logistic
from sklearn import datasets
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = datasets.load_iris()
def test_predict_2_classes():
"""Simple sanity check on a 2 classes dataset
Make sure it predicts the correct result on simple datasets.
"""
clf = logistic.LogisticRegression().fit(X, Y1)
assert_array_equal(clf.predict(X), Y1)
assert_array_equal(clf.predict_proba(X).argmax(axis=1), Y1)
clf = logistic.LogisticRegression().fit(X_sp, Y1)
assert_array_equal(clf.predict(X_sp), Y1)
assert_array_equal(clf.predict_proba(X_sp).argmax(axis=1), Y1)
clf = logistic.LogisticRegression(C=100).fit(X, Y1)
assert_array_equal(clf.predict(X), Y1)
assert_array_equal(clf.predict_proba(X).argmax(axis=1), Y1)
clf = logistic.LogisticRegression(C=100).fit(X_sp, Y1)
assert_array_equal(clf.predict(X_sp), Y1)
assert_array_equal(clf.predict_proba(X_sp).argmax(axis=1), Y1)
clf = logistic.LogisticRegression(fit_intercept=False).fit(X, Y1)
assert_array_equal(clf.predict(X), Y1)
assert_array_equal(clf.predict_proba(X).argmax(axis=1), Y1)
clf = logistic.LogisticRegression(fit_intercept=False).fit(X_sp, Y1)
assert_array_equal(clf.predict(X_sp), Y1)
assert_array_equal(clf.predict_proba(X_sp).argmax(axis=1), Y1)
def test_error():
"""Test for appropriate exception on errors"""
assert_raises(ValueError, logistic.LogisticRegression(C=-1).fit, X, Y1)
def test_predict_3_classes():
clf = logistic.LogisticRegression(C=10).fit(X, Y2)
assert_array_equal(clf.predict(X), Y2)
assert_array_equal(clf.predict_proba(X).argmax(axis=1), Y2)
clf = logistic.LogisticRegression(C=10).fit(X_sp, Y2)
assert_array_equal(clf.predict(X_sp), Y2)
assert_array_equal(clf.predict_proba(X_sp).argmax(axis=1), Y2)
def test_predict_iris():
"""Test logisic regression with the iris dataset"""
clf = logistic.LogisticRegression(C=len(iris.data)).fit(iris.data,
iris.target)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == iris.target), .95)
pred = clf.predict_proba(iris.data).argmax(axis=1)
assert_greater(np.mean(pred == iris.target), .95)
def test_inconsistent_input():
"""Test that an exception is raised on inconsistent input"""
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
clf = logistic.LogisticRegression()
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
@raises(ValueError)
def test_nan():
"""Test proper NaN handling.
Regression test for Issue #252: fit used to go into an infinite loop.
"""
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
logistic.LogisticRegression().fit(Xnan, Y1)
if __name__ == '__main__':
nose.runmodule()
|
sgenoud/scikit-learn
|
sklearn/linear_model/tests/test_logistic.py
|
Python
|
bsd-3-clause
| 3,312
|
from PyQt4 import QtGui, QtCore
from shapely.geometry import Point
from shapely import affinity
from math import sqrt
import FlatCAMApp
from GUIElements import *
from FlatCAMObj import FlatCAMGerber, FlatCAMExcellon
class FlatCAMTool(QtGui.QWidget):
toolName = "FlatCAM Generic Tool"
def __init__(self, app, parent=None):
"""
:param app: The application this tool will run in.
:type app: App
:param parent: Qt Parent
:return: FlatCAMTool
"""
QtGui.QWidget.__init__(self, parent)
# self.setSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum)
self.layout = QtGui.QVBoxLayout()
self.setLayout(self.layout)
self.app = app
self.menuAction = None
def install(self):
self.menuAction = self.app.ui.menutool.addAction(self.toolName)
self.menuAction.triggered.connect(self.run)
def run(self):
# Remove anything else in the GUI
self.app.ui.tool_scroll_area.takeWidget()
# Put ourself in the GUI
self.app.ui.tool_scroll_area.setWidget(self)
# Switch notebook to tool page
self.app.ui.notebook.setCurrentWidget(self.app.ui.tool_tab)
self.show()
class DblSidedTool(FlatCAMTool):
toolName = "Double-Sided PCB Tool"
def __init__(self, app):
FlatCAMTool.__init__(self, app)
## Title
title_label = QtGui.QLabel("<font size=4><b>%s</b></font>" % self.toolName)
self.layout.addWidget(title_label)
## Form Layout
form_layout = QtGui.QFormLayout()
self.layout.addLayout(form_layout)
## Layer to mirror
self.object_combo = QtGui.QComboBox()
self.object_combo.setModel(self.app.collection)
form_layout.addRow("Bottom Layer:", self.object_combo)
## Axis
self.mirror_axis = RadioSet([{'label': 'X', 'value': 'X'},
{'label': 'Y', 'value': 'Y'}])
form_layout.addRow("Mirror Axis:", self.mirror_axis)
## Axis Location
self.axis_location = RadioSet([{'label': 'Point', 'value': 'point'},
{'label': 'Box', 'value': 'box'}])
form_layout.addRow("Axis Location:", self.axis_location)
## Point/Box
self.point_box_container = QtGui.QVBoxLayout()
form_layout.addRow("Point/Box:", self.point_box_container)
self.point = EvalEntry()
self.point_box_container.addWidget(self.point)
self.box_combo = QtGui.QComboBox()
self.box_combo.setModel(self.app.collection)
self.point_box_container.addWidget(self.box_combo)
self.box_combo.hide()
## Alignment holes
self.alignment_holes = EvalEntry()
form_layout.addRow("Alignment Holes:", self.alignment_holes)
## Drill diameter for alignment holes
self.drill_dia = LengthEntry()
form_layout.addRow("Drill diam.:", self.drill_dia)
## Buttons
hlay = QtGui.QHBoxLayout()
self.layout.addLayout(hlay)
hlay.addStretch()
self.create_alignment_hole_button = QtGui.QPushButton("Create Alignment Drill")
self.mirror_object_button = QtGui.QPushButton("Mirror Object")
hlay.addWidget(self.create_alignment_hole_button)
hlay.addWidget(self.mirror_object_button)
self.layout.addStretch()
## Signals
self.create_alignment_hole_button.clicked.connect(self.on_create_alignment_holes)
self.mirror_object_button.clicked.connect(self.on_mirror)
self.axis_location.group_toggle_fn = self.on_toggle_pointbox
## Initialize form
self.mirror_axis.set_value('X')
self.axis_location.set_value('point')
def on_create_alignment_holes(self):
axis = self.mirror_axis.get_value()
mode = self.axis_location.get_value()
if mode == "point":
px, py = self.point.get_value()
else:
selection_index = self.box_combo.currentIndex()
bb_obj = self.app.collection.object_list[selection_index] # TODO: Direct access??
xmin, ymin, xmax, ymax = bb_obj.bounds()
px = 0.5*(xmin+xmax)
py = 0.5*(ymin+ymax)
xscale, yscale = {"X": (1.0, -1.0), "Y": (-1.0, 1.0)}[axis]
dia = self.drill_dia.get_value()
tools = {"1": {"C": dia}}
holes = self.alignment_holes.get_value()
drills = []
for hole in holes:
point = Point(hole)
point_mirror = affinity.scale(point, xscale, yscale, origin=(px, py))
drills.append({"point": point, "tool": "1"})
drills.append({"point": point_mirror, "tool": "1"})
def obj_init(obj_inst, app_inst):
obj_inst.tools = tools
obj_inst.drills = drills
obj_inst.create_geometry()
self.app.new_object("excellon", "Alignment Drills", obj_init)
def on_mirror(self):
selection_index = self.object_combo.currentIndex()
fcobj = self.app.collection.object_list[selection_index]
# For now, lets limit to Gerbers and Excellons.
# assert isinstance(gerb, FlatCAMGerber)
if not isinstance(fcobj, FlatCAMGerber) and not isinstance(fcobj, FlatCAMExcellon):
self.info("ERROR: Only Gerber and Excellon objects can be mirrored.")
return
axis = self.mirror_axis.get_value()
mode = self.axis_location.get_value()
if mode == "point":
px, py = self.point.get_value()
else:
selection_index = self.box_combo.currentIndex()
bb_obj = self.app.collection.object_list[selection_index] # TODO: Direct access??
xmin, ymin, xmax, ymax = bb_obj.bounds()
px = 0.5*(xmin+xmax)
py = 0.5*(ymin+ymax)
fcobj.mirror(axis, [px, py])
fcobj.plot()
def on_toggle_pointbox(self):
if self.axis_location.get_value() == "point":
self.point.show()
self.box_combo.hide()
else:
self.point.hide()
self.box_combo.show()
class Measurement(FlatCAMTool):
toolName = "Measurement Tool"
def __init__(self, app):
FlatCAMTool.__init__(self, app)
# self.setContentsMargins(0, 0, 0, 0)
self.layout.setMargin(0)
self.layout.setContentsMargins(0, 0, 3, 0)
self.setSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Maximum)
self.point1 = None
self.point2 = None
self.label = QtGui.QLabel("Click on a reference point ...")
self.label.setFrameStyle(QtGui.QFrame.StyledPanel | QtGui.QFrame.Plain)
self.label.setMargin(3)
self.layout.addWidget(self.label)
# self.layout.setMargin(0)
self.setVisible(False)
self.click_subscription = None
self.move_subscription = None
def install(self):
FlatCAMTool.install(self)
self.app.ui.right_layout.addWidget(self)
self.app.plotcanvas.mpl_connect('key_press_event', self.on_key_press)
def run(self):
self.toggle()
def on_click(self, event):
if self.point1 is None:
self.point1 = (event.xdata, event.ydata)
else:
self.point2 = copy(self.point1)
self.point1 = (event.xdata, event.ydata)
self.on_move(event)
def on_key_press(self, event):
if event.key == 'm':
self.toggle()
def toggle(self):
if self.isVisible():
self.setVisible(False)
self.app.plotcanvas.mpl_disconnect(self.move_subscription)
self.app.plotcanvas.mpl_disconnect(self.click_subscription)
else:
self.setVisible(True)
self.move_subscription = self.app.plotcanvas.mpl_connect('motion_notify_event', self.on_move)
self.click_subscription = self.app.plotcanvas.mpl_connect('button_press_event', self.on_click)
def on_move(self, event):
if self.point1 is None:
self.label.setText("Click on a reference point...")
else:
try:
dx = event.xdata - self.point1[0]
dy = event.ydata - self.point1[1]
d = sqrt(dx**2 + dy**2)
self.label.setText("D = %.4f D(x) = %.4f D(y) = %.4f" % (d, dx, dy))
except TypeError:
pass
if self.update is not None:
self.update()
|
silasb/flatcam
|
FlatCAMTool.py
|
Python
|
mit
| 8,497
|
import os
import time
import threading
import warnings
from django.conf import settings
from django.db import connections
from django.dispatch import receiver, Signal
from django.utils import timezone
from django.utils.functional import empty
template_rendered = Signal(providing_args=["template", "context"])
setting_changed = Signal(providing_args=["setting", "value", "enter"])
# Most setting_changed receivers are supposed to be added below,
# except for cases where the receiver is related to a contrib app.
# Settings that may not work well when using 'override_settings' (#19031)
COMPLEX_OVERRIDE_SETTINGS = set(['DATABASES'])
@receiver(setting_changed)
def clear_cache_handlers(**kwargs):
if kwargs['setting'] == 'CACHES':
from django.core.cache import caches
caches._caches = threading.local()
@receiver(setting_changed)
def update_installed_apps(**kwargs):
if kwargs['setting'] == 'INSTALLED_APPS':
# Rebuild any AppDirectoriesFinder instance.
from django.contrib.staticfiles.finders import get_finder
get_finder.cache_clear()
# Rebuild app_template_dirs cache.
from django.template.loaders import app_directories as mod
mod.app_template_dirs = mod.calculate_app_template_dirs()
# Rebuild templatetags module cache.
from django.template import base
base.templatetags_modules[:] = []
# Rebuild management commands cache
from django.core.management import get_commands
get_commands.cache_clear()
@receiver(setting_changed)
def update_connections_time_zone(**kwargs):
if kwargs['setting'] == 'TIME_ZONE':
# Reset process time zone
if hasattr(time, 'tzset'):
if kwargs['value']:
os.environ['TZ'] = kwargs['value']
else:
os.environ.pop('TZ', None)
time.tzset()
# Reset local time zone cache
timezone._localtime = None
# Reset the database connections' time zone
if kwargs['setting'] == 'USE_TZ' and settings.TIME_ZONE != 'UTC':
USE_TZ, TIME_ZONE = kwargs['value'], settings.TIME_ZONE
elif kwargs['setting'] == 'TIME_ZONE' and not settings.USE_TZ:
USE_TZ, TIME_ZONE = settings.USE_TZ, kwargs['value']
else:
# no need to change the database connnections' time zones
return
tz = 'UTC' if USE_TZ else TIME_ZONE
for conn in connections.all():
conn.settings_dict['TIME_ZONE'] = tz
tz_sql = conn.ops.set_time_zone_sql()
if tz_sql:
conn.cursor().execute(tz_sql, [tz])
@receiver(setting_changed)
def clear_context_processors_cache(**kwargs):
if kwargs['setting'] == 'TEMPLATE_CONTEXT_PROCESSORS':
from django.template import context
context._standard_context_processors = None
@receiver(setting_changed)
def clear_template_loaders_cache(**kwargs):
if kwargs['setting'] == 'TEMPLATE_LOADERS':
from django.template import loader
loader.template_source_loaders = None
@receiver(setting_changed)
def clear_serializers_cache(**kwargs):
if kwargs['setting'] == 'SERIALIZATION_MODULES':
from django.core import serializers
serializers._serializers = {}
@receiver(setting_changed)
def language_changed(**kwargs):
if kwargs['setting'] in ('LOCALE_PATHS', 'LANGUAGE_CODE'):
from django.utils.translation import trans_real
trans_real._default = None
if kwargs['setting'] == 'LOCALE_PATHS':
trans_real._translations = {}
@receiver(setting_changed)
def file_storage_changed(**kwargs):
if kwargs['setting'] in ('MEDIA_ROOT', 'DEFAULT_FILE_STORAGE'):
from django.core.files.storage import default_storage
default_storage._wrapped = empty
@receiver(setting_changed)
def complex_setting_changed(**kwargs):
if kwargs['enter'] and kwargs['setting'] in COMPLEX_OVERRIDE_SETTINGS:
# Considering the current implementation of the signals framework,
# stacklevel=5 shows the line containing the override_settings call.
warnings.warn("Overriding setting %s can lead to unexpected behaviour."
% kwargs['setting'], stacklevel=5)
|
Beeblio/django
|
django/test/signals.py
|
Python
|
bsd-3-clause
| 4,210
|
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
import argparse
import base64
import functools
import hashlib
import inspect
import os
import sys
from google.protobuf.empty_pb2 import Empty
import grpc
from grpc import (composite_channel_credentials, metadata_call_credentials,
ssl_channel_credentials)
from twisted.internet import reactor
from xosconfig import Config
from xosapi import orm
import xosapi.chameleon_client.grpc_client as chameleon_client
from multistructlog import create_logger
log = create_logger(Config().get("logging"))
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path = [currentdir] + sys.path
SERVER_CA = "/usr/local/share/ca-certificates/local_certs.crt"
class UsernamePasswordCallCredentials(grpc.AuthMetadataPlugin):
"""Metadata wrapper for raw access token credentials."""
def __init__(self, username, password):
self._username = username
self._password = password
def __call__(self, context, callback):
basic_auth = "Basic %s" % base64.b64encode(
"%s:%s" % (self._username, self._password)
)
metadata = (("authorization", basic_auth),)
callback(metadata, None)
class SessionIdCallCredentials(grpc.AuthMetadataPlugin):
"""Metadata wrapper for raw access token credentials."""
def __init__(self, sessionid):
self._sessionid = sessionid
def __call__(self, context, callback):
metadata = (("x-xossession", self._sessionid),)
callback(metadata, None)
class XOSClient(chameleon_client.GrpcClient):
# We layer our own reconnect_callback functionality so we can setup the
# ORM before calling reconnect_callback.
def __init__(self, *args, **kwargs):
self.hashes = {}
self.restart_on_protobuf_change = False
super(XOSClient, self).__init__(*args, **kwargs)
def set_reconnect_callback(self, reconnect_callback):
self.reconnect_callback2 = reconnect_callback
return self
def request_convenience_methods(self):
convenience_methods_dir = "/var/run/xosapi/convenience"
if not os.path.exists(convenience_methods_dir):
log.info("Creating convenience methods directory", convenience_methods_dir=convenience_methods_dir)
os.makedirs(convenience_methods_dir)
try:
response = self.dynamicload.GetConvenienceMethods(Empty())
if response:
log.info(
"Saving convenience methods",
methods=[m.filename for m in response.convenience_methods],
)
for cm in response.convenience_methods:
log.debug("Saving convenience method", method=cm.filename)
save_path = os.path.join(convenience_methods_dir, cm.filename)
open(save_path, "w").write(cm.contents)
else:
log.exception(
"Cannot load convenience methods, restarting the synchronzier"
)
os.execv(sys.executable, ["python"] + sys.argv)
except grpc._channel._Rendezvous as e:
code = e.code()
if code == grpc.StatusCode.UNAVAILABLE:
# NOTE if the core is not available, restart the synchronizer
os.execv(sys.executable, ["python"] + sys.argv)
def hash_check(self, pb2_file_name, pb2_grpc_file_name):
# If the protobufs have changed, then it's likely that new models
# have been downloaded. One way we have dealt with this in the past
# is to force a reload() of the affected modules. However, it seems
# safer to force a full synchronizer restart as this will allow
# the synchronizer to perform a version check against the core, and
# it will refresh any data structures that might be affected by the
# new models.
pb2_hash = hashlib.sha256(open(pb2_file_name).read())
pb2_grpc_hash = hashlib.sha256(open(pb2_grpc_file_name).read())
if (pb2_file_name in self.hashes) or (pb2_grpc_file_name in self.hashes):
if (pb2_hash != self.hashes[pb2_file_name]) or (pb2_grpc_hash != self.hashes[pb2_grpc_file_name]):
log.warning(
"Protobuf change detected, restarting the synchronzier"
)
os.execv(sys.executable, ["python"] + sys.argv)
self.hashes[pb2_file_name] = pb2_hash
self.hashes[pb2_grpc_file_name] = pb2_grpc_hash
def reconnected(self):
for api in ["modeldefs", "utility", "xos", "dynamicload"]:
pb2_file_name = os.path.join(self.work_dir, api + "_pb2.py")
pb2_grpc_file_name = os.path.join(self.work_dir, api + "_pb2_grpc.py")
if os.path.exists(pb2_file_name) and os.path.exists(pb2_grpc_file_name):
if self.restart_on_protobuf_change:
self.hash_check(pb2_file_name, pb2_grpc_file_name)
orig_sys_path = sys.path
try:
sys.path.append(self.work_dir)
m_protos = __import__(api + "_pb2")
m_grpc = __import__(api + "_pb2_grpc")
finally:
sys.path = orig_sys_path
stub_class = getattr(m_grpc, api + "Stub")
setattr(self, api, stub_class(self.channel))
setattr(self, api + "_pb2", m_protos)
else:
print("failed to locate api", api, file=sys.stderr)
if hasattr(self, "xos"):
self.xos_orm = orm.ORMStub(self.xos, self.xos_pb2, "xos")
# ask the core for the convenience methods
self.request_convenience_methods()
# Load convenience methods after reconnect
orm.import_convenience_methods()
if self.reconnect_callback2:
self.reconnect_callback2()
class InsecureClient(XOSClient):
def __init__(
self,
consul_endpoint=None,
work_dir="/tmp/xos_grpc_protos",
endpoint="localhost:50055",
reconnect_callback=None,
):
super(InsecureClient, self).__init__(
consul_endpoint, work_dir, endpoint, self.reconnected
)
self.reconnect_callback2 = reconnect_callback
class SecureClient(XOSClient):
def __init__(
self,
consul_endpoint=None,
work_dir="/tmp/xos_grpc_protos",
endpoint="localhost:50055",
reconnect_callback=None,
cacert=SERVER_CA,
username=None,
password=None,
sessionid=None,
):
server_ca = open(cacert, "r").read()
if sessionid:
call_creds = metadata_call_credentials(SessionIdCallCredentials(sessionid))
else:
call_creds = metadata_call_credentials(
UsernamePasswordCallCredentials(username, password)
)
chan_creds = ssl_channel_credentials(server_ca)
chan_creds = composite_channel_credentials(chan_creds, call_creds)
super(SecureClient, self).__init__(
consul_endpoint, work_dir, endpoint, self.reconnected, chan_creds
)
self.reconnect_callback2 = reconnect_callback
# -----------------------------------------------------------------------------
# Wrappers for easy setup for test cases, etc
# -----------------------------------------------------------------------------
def parse_args():
parser = argparse.ArgumentParser()
defs = {
"grpc_insecure_endpoint": "xos-core.cord.lab:50055",
"grpc_secure_endpoint": "xos-core.cord.lab:50051",
"config": "/opt/xos/config.yml",
}
_help = "Path to the config file (default: %s)" % defs["config"]
parser.add_argument(
"-C",
"--config",
dest="config",
action="store",
default=defs["config"],
help=_help,
)
_help = (
"gRPC insecure end-point to connect to. It is a direct",
". (default: %s" % defs["grpc_insecure_endpoint"],
)
parser.add_argument(
"-G",
"--grpc-insecure-endpoint",
dest="grpc_insecure_endpoint",
action="store",
default=defs["grpc_insecure_endpoint"],
help=_help,
)
_help = (
"gRPC secure end-point to connect to. It is a direct",
". (default: %s" % defs["grpc_secure_endpoint"],
)
parser.add_argument(
"-S",
"--grpc-secure-endpoint",
dest="grpc_secure_endpoint",
action="store",
default=defs["grpc_secure_endpoint"],
help=_help,
)
parser.add_argument(
"-u", "--username", dest="username", action="store", default=None, help=_help
)
parser.add_argument(
"-p", "--password", dest="password", action="store", default=None, help=_help
)
_help = "omit startup banner log lines"
parser.add_argument(
"-n",
"--no-banner",
dest="no_banner",
action="store_true",
default=False,
help=_help,
)
_help = "suppress debug and info logs"
parser.add_argument("-q", "--quiet", dest="quiet", action="count", help=_help)
_help = "enable verbose logging"
parser.add_argument("-v", "--verbose", dest="verbose", action="count", help=_help)
args = parser.parse_args()
return args
def coreclient_reconnect(client, reconnect_callback, *args, **kwargs):
global coreapi
coreapi = coreclient.xos_orm
if reconnect_callback:
reconnect_callback(*args, **kwargs)
reactor.stop()
def start_api(reconnect_callback, *args, **kwargs):
global coreclient
if kwargs.get("username", None):
coreclient = SecureClient(*args, **kwargs)
else:
coreclient = InsecureClient(*args, **kwargs)
coreclient.set_reconnect_callback(
functools.partial(coreclient_reconnect, coreclient, reconnect_callback)
)
coreclient.start()
reactor.run()
def start_api_parseargs(reconnect_callback):
""" This function is an entrypoint for tests and other simple programs to
setup the API and get a callback when the API is ready.
"""
args = parse_args()
if args.username:
start_api(
reconnect_callback,
endpoint=args.grpc_secure_endpoint,
username=args.username,
password=args.password,
)
else:
start_api(reconnect_callback, endpoint=args.grpc_insecure_endpoint)
# -----------------------------------------------------------------------------
# Self test
# -----------------------------------------------------------------------------
def insecure_callback(client):
print("insecure self_test start")
print(client.xos_orm.User.objects.all())
print("insecure self_test done")
# now start the next test
client.stop()
reactor.callLater(0, start_secure_test)
def start_insecure_test():
client = InsecureClient(endpoint="xos-core:50055")
client.set_reconnect_callback(functools.partial(insecure_callback, client))
client.start()
def secure_callback(client):
print("secure self_test start")
print(client.xos_orm.User.objects.all())
print("secure self_test done")
reactor.stop()
def start_secure_test():
client = SecureClient(
endpoint="xos-core:50051", username="admin@opencord.org", password="letmein"
)
client.set_reconnect_callback(functools.partial(secure_callback, client))
client.start()
def main():
reactor.callLater(0, start_insecure_test)
reactor.run()
if __name__ == "__main__":
main()
|
opencord/xos
|
lib/xos-api/xosapi/xos_grpc_client.py
|
Python
|
apache-2.0
| 12,268
|
# -*- coding: utf-8 -*-
#
# This file is part of EUDAT B2Share.
# Copyright (C) 2016 CERN.
#
# B2Share is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# B2Share is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with B2Share; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""B2SHARE"""
from __future__ import absolute_import, print_function
from .ext import B2ShareFiles
__all__ = ('B2ShareFiles')
|
emanueldima/b2share
|
b2share/modules/files/__init__.py
|
Python
|
gpl-2.0
| 908
|
from django.conf.urls import url,include
from django.contrib import admin
urlpatterns = [
url(r'',include('Register_and_login.urls')),
url(r'^homepage/',include('MainPage.urls')),
url(r'^username/cart/',include('cart.urls')),
url(r'^username/',include('customer.urls')),
url(r'^pharmacy_name/',include('pharmacy.urls')),
url(r'^pharmacy_name/inventory',include('inventory.urls')),
url(r'^search/all_search=pcm',include('items.urls')),
url(r'^$', include('order.urls')),
url(r'^admin/', admin.site.urls)
]
|
mpiplani/Online-Pharmacy
|
online_pharmacy/online_pharmacy/online_pharmacy/urls.py
|
Python
|
apache-2.0
| 542
|
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/router -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_adm_router
short_description: Module to manage openshift router
description:
- Manage openshift router programmatically.
options:
state:
description:
- Whether to create or delete the router
- present - create the router
- absent - remove the router
- list - return the current representation of a router
required: false
default: present
choices:
- present
- absent
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- The name of the router
required: false
default: router
aliases: []
namespace:
description:
- The namespace where to manage the router.
required: false
default: default
aliases: []
images:
description:
- The image to base this router on - ${component} will be replaced with --type
required: 'openshift3/ose-${component}:${version}'
default: None
aliases: []
latest_images:
description:
- If true, attempt to use the latest image for the registry instead of the latest release.
required: false
default: False
aliases: []
labels:
description:
- A set of labels to uniquely identify the registry and its components.
required: false
default: None
aliases: []
ports:
description:
- A list of strings in the 'port:port' format
required: False
default:
- 80:80
- 443:443
aliases: []
replicas:
description:
- The replication factor of the registry; commonly 2 when high availability is desired.
required: False
default: 1
aliases: []
selector:
description:
- Selector used to filter nodes on deployment. Used to run routers on a specific set of nodes.
required: False
default: None
aliases: []
service_account:
description:
- Name of the service account to use to run the router pod.
required: False
default: router
aliases: []
router_type:
description:
- The router image to use - if you specify --images this flag may be ignored.
required: false
default: haproxy-router
aliases: []
external_host:
description:
- If the underlying router implementation connects with an external host, this is the external host's hostname.
required: false
default: None
aliases: []
external_host_vserver:
description:
- If the underlying router implementation uses virtual servers, this is the name of the virtual server for HTTP connections.
required: false
default: None
aliases: []
external_host_insecure:
description:
- If the underlying router implementation connects with an external host
- over a secure connection, this causes the router to skip strict certificate verification with the external host.
required: false
default: False
aliases: []
external_host_partition_path:
description:
- If the underlying router implementation uses partitions for control boundaries, this is the path to use for that partition.
required: false
default: None
aliases: []
external_host_username:
description:
- If the underlying router implementation connects with an external host, this is the username for authenticating with the external host.
required: false
default: None
aliases: []
external_host_password:
description:
- If the underlying router implementation connects with an external host, this is the password for authenticating with the external host.
required: false
default: None
aliases: []
external_host_private_key:
description:
- If the underlying router implementation requires an SSH private key, this is the path to the private key file.
required: false
default: None
aliases: []
expose_metrics:
description:
- This is a hint to run an extra container in the pod to expose metrics - the image
- will either be set depending on the router implementation or provided with --metrics-image.
required: false
default: False
aliases: []
metrics_image:
description:
- If expose_metrics is specified this is the image to use to run a sidecar container
- in the pod exposing metrics. If not set and --expose-metrics is true the image will
- depend on router implementation.
required: false
default: None
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment:
- There are some exceptions to note when doing the idempotency in this module.
- The strategy is to use the oc adm router command to generate a default
- configuration when creating or updating a router. Often times there
- differences from the generated template and what is in memory in openshift.
- We make exceptions to not check these specific values when comparing objects.
- Here are a list of exceptions:
- - DeploymentConfig:
- dnsPolicy
- terminationGracePeriodSeconds
- restartPolicy
- timeoutSeconds
- livenessProbe
- readinessProbe
- terminationMessagePath
- hostPort
- defaultMode
- Service:
- portalIP
- clusterIP
- sessionAffinity
- type
- ServiceAccount:
- secrets
- imagePullSecrets
'''
EXAMPLES = '''
- name: create routers
oc_adm_router:
name: router
service_account: router
replicas: 2
namespace: default
selector: type=infra
cert_file: /etc/origin/master/named_certificates/router.crt
key_file: /etc/origin/master/named_certificates/router.key
cacert_file: /etc/origin/master/named_certificates/router.ca
edits:
- key: spec.strategy.rollingParams
value:
intervalSeconds: 1
maxSurge: 50%
maxUnavailable: 50%
timeoutSeconds: 600
updatePeriodSeconds: 1
action: put
- key: spec.template.spec.containers[0].resources.limits.memory
value: 2G
action: put
- key: spec.template.spec.containers[0].resources.requests.memory
value: 1G
action: put
- key: spec.template.spec.containers[0].env
value:
name: EXTENDED_VALIDATION
value: 'false'
action: update
register: router_out
run_once: True
'''
# -*- -*- -*- End included fragment: doc/router -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/service.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class ServiceConfig(object):
''' Handle service options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
ports,
annotations=None,
selector=None,
labels=None,
cluster_ip=None,
portal_ip=None,
session_affinity=None,
service_type=None,
external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
self.ports = ports
self.annotations = annotations
self.selector = selector
self.labels = labels
self.cluster_ip = cluster_ip
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
self.external_ips = external_ips
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiates a service dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Service'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
self.data['metadata']['labels'] = {}
for lab, lab_value in self.labels.items():
self.data['metadata']['labels'][lab] = lab_value
if self.annotations:
self.data['metadata']['annotations'] = self.annotations
self.data['spec'] = {}
if self.ports:
self.data['spec']['ports'] = self.ports
else:
self.data['spec']['ports'] = []
if self.selector:
self.data['spec']['selector'] = self.selector
self.data['spec']['sessionAffinity'] = self.session_affinity or 'None'
if self.cluster_ip:
self.data['spec']['clusterIP'] = self.cluster_ip
if self.portal_ip:
self.data['spec']['portalIP'] = self.portal_ip
if self.service_type:
self.data['spec']['type'] = self.service_type
if self.external_ips:
self.data['spec']['externalIPs'] = self.external_ips
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
port_path = "spec.ports"
portal_ip = "spec.portalIP"
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
super(Service, self).__init__(content=content)
def get_ports(self):
''' get a list of ports '''
return self.get(Service.port_path) or []
def get_selector(self):
''' get the service selector'''
return self.get(Service.selector_path) or {}
def add_ports(self, inc_ports):
''' add a port object to the ports list '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get_ports()
if not ports:
self.put(Service.port_path, inc_ports)
else:
ports.extend(inc_ports)
return True
def find_ports(self, inc_port):
''' find a specific port '''
for port in self.get_ports():
if port['port'] == inc_port['port']:
return port
return None
def delete_ports(self, inc_ports):
''' remove a port from a service '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get(Service.port_path) or []
if not ports:
return True
removed = False
for inc_port in inc_ports:
port = self.find_ports(inc_port)
if port:
ports.remove(port)
removed = True
return removed
def add_cluster_ip(self, sip):
'''add cluster ip'''
self.put(Service.cluster_ip, sip)
def add_portal_ip(self, pip):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
def get_external_ips(self):
''' get a list of external_ips '''
return self.get(Service.external_ips) or []
def add_external_ips(self, inc_external_ips):
''' add an external_ip to the external_ips list '''
if not isinstance(inc_external_ips, list):
inc_external_ips = [inc_external_ips]
external_ips = self.get_external_ips()
if not external_ips:
self.put(Service.external_ips, inc_external_ips)
else:
external_ips.extend(inc_external_ips)
return True
def find_external_ips(self, inc_external_ip):
''' find a specific external IP '''
val = None
try:
idx = self.get_external_ips().index(inc_external_ip)
val = self.get_external_ips()[idx]
except ValueError:
pass
return val
def delete_external_ips(self, inc_external_ips):
''' remove an external IP from a service '''
if not isinstance(inc_external_ips, list):
inc_external_ips = [inc_external_ips]
external_ips = self.get(Service.external_ips) or []
if not external_ips:
return True
removed = False
for inc_external_ip in inc_external_ips:
external_ip = self.find_external_ips(inc_external_ip)
if external_ip:
external_ips.remove(external_ip)
removed = True
return removed
# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*-
# pylint: disable=too-many-public-methods
class DeploymentConfig(Yedit):
''' Class to model an openshift DeploymentConfig'''
default_deployment_config = '''
apiVersion: v1
kind: DeploymentConfig
metadata:
name: default_dc
namespace: default
spec:
replicas: 0
selector:
default_dc: default_dc
strategy:
resources: {}
rollingParams:
intervalSeconds: 1
maxSurge: 0
maxUnavailable: 25%
timeoutSeconds: 600
updatePercent: -25
updatePeriodSeconds: 1
type: Rolling
template:
metadata:
spec:
containers:
- env:
- name: default
value: default
image: default
imagePullPolicy: IfNotPresent
name: default_dc
ports:
- containerPort: 8000
hostPort: 8000
protocol: TCP
name: default_port
resources: {}
terminationMessagePath: /dev/termination-log
dnsPolicy: ClusterFirst
hostNetwork: true
nodeSelector:
type: compute
restartPolicy: Always
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
triggers:
- type: ConfigChange
'''
replicas_path = "spec.replicas"
env_path = "spec.template.spec.containers[0].env"
volumes_path = "spec.template.spec.volumes"
container_path = "spec.template.spec.containers"
volume_mounts_path = "spec.template.spec.containers[0].volumeMounts"
def __init__(self, content=None):
''' Constructor for deploymentconfig '''
if not content:
content = DeploymentConfig.default_deployment_config
super(DeploymentConfig, self).__init__(content=content)
def add_env_value(self, key, value):
''' add key, value pair to env array '''
rval = False
env = self.get_env_vars()
if env:
env.append({'name': key, 'value': value})
rval = True
else:
result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value})
rval = result[0]
return rval
def exists_env_value(self, key, value):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key and result['value'] == value:
return True
return False
def exists_env_key(self, key):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key:
return True
return False
def get_env_var(self, key):
'''return a environment variables '''
results = self.get(DeploymentConfig.env_path) or []
if not results:
return None
for env_var in results:
if env_var['name'] == key:
return env_var
return None
def get_env_vars(self):
'''return a environment variables '''
return self.get(DeploymentConfig.env_path) or []
def delete_env_var(self, keys):
'''delete a list of keys '''
if not isinstance(keys, list):
keys = [keys]
env_vars_array = self.get_env_vars()
modified = False
idx = None
for key in keys:
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
modified = True
del env_vars_array[idx]
if modified:
return True
return False
def update_env_var(self, key, value):
'''place an env in the env var list'''
env_vars_array = self.get_env_vars()
idx = None
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
env_vars_array[idx]['value'] = value
else:
self.add_env_value(key, value)
return True
def exists_volume_mount(self, volume_mount):
''' return whether a volume mount exists '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts:
return False
volume_mount_found = False
for exist_volume_mount in exist_volume_mounts:
if exist_volume_mount['name'] == volume_mount['name']:
volume_mount_found = True
break
return volume_mount_found
def exists_volume(self, volume):
''' return whether a volume exists '''
exist_volumes = self.get_volumes()
volume_found = False
for exist_volume in exist_volumes:
if exist_volume['name'] == volume['name']:
volume_found = True
break
return volume_found
def find_volume_by_name(self, volume, mounts=False):
''' return the index of a volume '''
volumes = []
if mounts:
volumes = self.get_volume_mounts()
else:
volumes = self.get_volumes()
for exist_volume in volumes:
if exist_volume['name'] == volume['name']:
return exist_volume
return None
def get_replicas(self):
''' return replicas setting '''
return self.get(DeploymentConfig.replicas_path)
def get_volume_mounts(self):
'''return volume mount information '''
return self.get_volumes(mounts=True)
def get_volumes(self, mounts=False):
'''return volume mount information '''
if mounts:
return self.get(DeploymentConfig.volume_mounts_path) or []
return self.get(DeploymentConfig.volumes_path) or []
def delete_volume_by_name(self, volume):
'''delete a volume '''
modified = False
exist_volume_mounts = self.get_volume_mounts()
exist_volumes = self.get_volumes()
del_idx = None
for idx, exist_volume in enumerate(exist_volumes):
if 'name' in exist_volume and exist_volume['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volumes[del_idx]
modified = True
del_idx = None
for idx, exist_volume_mount in enumerate(exist_volume_mounts):
if 'name' in exist_volume_mount and exist_volume_mount['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volume_mounts[idx]
modified = True
return modified
def add_volume_mount(self, volume_mount):
''' add a volume or volume mount to the proper location '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts and volume_mount:
self.put(DeploymentConfig.volume_mounts_path, [volume_mount])
else:
exist_volume_mounts.append(volume_mount)
def add_volume(self, volume):
''' add a volume or volume mount to the proper location '''
exist_volumes = self.get_volumes()
if not volume:
return
if not exist_volumes:
self.put(DeploymentConfig.volumes_path, [volume])
else:
exist_volumes.append(volume)
def update_replicas(self, replicas):
''' update replicas value '''
self.put(DeploymentConfig.replicas_path, replicas)
def update_volume(self, volume):
'''place an env in the env var list'''
exist_volumes = self.get_volumes()
if not volume:
return False
# update the volume
update_idx = None
for idx, exist_vol in enumerate(exist_volumes):
if exist_vol['name'] == volume['name']:
update_idx = idx
break
if update_idx != None:
exist_volumes[update_idx] = volume
else:
self.add_volume(volume)
return True
def update_volume_mount(self, volume_mount):
'''place an env in the env var list'''
modified = False
exist_volume_mounts = self.get_volume_mounts()
if not volume_mount:
return False
# update the volume mount
for exist_vol_mount in exist_volume_mounts:
if exist_vol_mount['name'] == volume_mount['name']:
if 'mountPath' in exist_vol_mount and \
str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']):
exist_vol_mount['mountPath'] = volume_mount['mountPath']
modified = True
break
if not modified:
self.add_volume_mount(volume_mount)
modified = True
return modified
def needs_update_volume(self, volume, volume_mount):
''' verify a volume update is needed '''
exist_volume = self.find_volume_by_name(volume)
exist_volume_mount = self.find_volume_by_name(volume, mounts=True)
results = []
results.append(exist_volume['name'] == volume['name'])
if 'secret' in volume:
results.append('secret' in exist_volume)
results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName'])
results.append(exist_volume_mount['name'] == volume_mount['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'emptyDir' in volume:
results.append(exist_volume_mount['name'] == volume['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'persistentVolumeClaim' in volume:
pvc = 'persistentVolumeClaim'
results.append(pvc in exist_volume)
if results[-1]:
results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName'])
if 'claimSize' in volume[pvc]:
results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize'])
elif 'hostpath' in volume:
results.append('hostPath' in exist_volume)
results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath'])
return not all(results)
def needs_update_replicas(self, replicas):
''' verify whether a replica update is needed '''
current_reps = self.get(DeploymentConfig.replicas_path)
return not current_reps == replicas
# -*- -*- -*- End included fragment: lib/deploymentconfig.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/serviceaccount.py -*- -*- -*-
class ServiceAccountConfig(object):
'''Service account config class
This class stores the options and returns a default service account
'''
# pylint: disable=too-many-arguments
def __init__(self, sname, namespace, kubeconfig, secrets=None, image_pull_secrets=None):
self.name = sname
self.kubeconfig = kubeconfig
self.namespace = namespace
self.secrets = secrets or []
self.image_pull_secrets = image_pull_secrets or []
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiate a properly structured volume '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'ServiceAccount'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['secrets'] = []
if self.secrets:
for sec in self.secrets:
self.data['secrets'].append({"name": sec})
self.data['imagePullSecrets'] = []
if self.image_pull_secrets:
for sec in self.image_pull_secrets:
self.data['imagePullSecrets'].append({"name": sec})
class ServiceAccount(Yedit):
''' Class to wrap the oc command line tools '''
image_pull_secrets_path = "imagePullSecrets"
secrets_path = "secrets"
def __init__(self, content):
'''ServiceAccount constructor'''
super(ServiceAccount, self).__init__(content=content)
self._secrets = None
self._image_pull_secrets = None
@property
def image_pull_secrets(self):
''' property for image_pull_secrets '''
if self._image_pull_secrets is None:
self._image_pull_secrets = self.get(ServiceAccount.image_pull_secrets_path) or []
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, secrets):
''' property for secrets '''
self._image_pull_secrets = secrets
@property
def secrets(self):
''' property for secrets '''
if not self._secrets:
self._secrets = self.get(ServiceAccount.secrets_path) or []
return self._secrets
@secrets.setter
def secrets(self, secrets):
''' property for secrets '''
self._secrets = secrets
def delete_secret(self, inc_secret):
''' remove a secret '''
remove_idx = None
for idx, sec in enumerate(self.secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.secrets[remove_idx]
return True
return False
def delete_image_pull_secret(self, inc_secret):
''' remove a image_pull_secret '''
remove_idx = None
for idx, sec in enumerate(self.image_pull_secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.image_pull_secrets[remove_idx]
return True
return False
def find_secret(self, inc_secret):
'''find secret'''
for secret in self.secrets:
if secret['name'] == inc_secret:
return secret
return None
def find_image_pull_secret(self, inc_secret):
'''find secret'''
for secret in self.image_pull_secrets:
if secret['name'] == inc_secret:
return secret
return None
def add_secret(self, inc_secret):
'''add secret'''
if self.secrets:
self.secrets.append({"name": inc_secret}) # pylint: disable=no-member
else:
self.put(ServiceAccount.secrets_path, [{"name": inc_secret}])
def add_image_pull_secret(self, inc_secret):
'''add image_pull_secret'''
if self.image_pull_secrets:
self.image_pull_secrets.append({"name": inc_secret}) # pylint: disable=no-member
else:
self.put(ServiceAccount.image_pull_secrets_path, [{"name": inc_secret}])
# -*- -*- -*- End included fragment: lib/serviceaccount.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/secret.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class SecretConfig(object):
''' Handle secret options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
kubeconfig,
secrets=None,
stype=None,
annotations=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
self.type = stype
self.namespace = namespace
self.secrets = secrets
self.annotations = annotations
self.data = {}
self.create_dict()
def create_dict(self):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
self.data['type'] = self.type
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['data'] = {}
if self.secrets:
for key, value in self.secrets.items():
self.data['data'][key] = value
if self.annotations:
self.data['metadata']['annotations'] = self.annotations
# pylint: disable=too-many-instance-attributes
class Secret(Yedit):
''' Class to wrap the oc command line tools '''
secret_path = "data"
kind = 'secret'
def __init__(self, content):
'''secret constructor'''
super(Secret, self).__init__(content=content)
self._secrets = None
@property
def secrets(self):
'''secret property getter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
@secrets.setter
def secrets(self):
'''secret property setter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
def get_secrets(self):
''' returns all of the defined secrets '''
return self.get(Secret.secret_path) or {}
def add_secret(self, key, value):
''' add a secret '''
if self.secrets:
self.secrets[key] = value
else:
self.put(Secret.secret_path, {key: value})
return True
def delete_secret(self, key):
''' delete secret'''
try:
del self.secrets[key]
except KeyError as _:
return False
return True
def find_secret(self, key):
''' find secret'''
rval = None
try:
rval = self.secrets[key]
except KeyError as _:
return None
return {'key': key, 'value': rval}
def update_secret(self, key, value):
''' update a secret'''
if key in self.secrets:
self.secrets[key] = value
else:
self.add_secret(key, value)
return True
# -*- -*- -*- End included fragment: lib/secret.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/rolebinding.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class RoleBindingConfig(object):
''' Handle rolebinding config '''
# pylint: disable=too-many-arguments
def __init__(self,
name,
namespace,
kubeconfig,
group_names=None,
role_ref=None,
subjects=None,
usernames=None):
''' constructor for handling rolebinding options '''
self.kubeconfig = kubeconfig
self.name = name
self.namespace = namespace
self.group_names = group_names
self.role_ref = role_ref
self.subjects = subjects
self.usernames = usernames
self.data = {}
self.create_dict()
def create_dict(self):
''' create a default rolebinding as a dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'RoleBinding'
self.data['groupNames'] = self.group_names
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['roleRef'] = self.role_ref
self.data['subjects'] = self.subjects
self.data['userNames'] = self.usernames
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class RoleBinding(Yedit):
''' Class to model a rolebinding openshift object'''
group_names_path = "groupNames"
role_ref_path = "roleRef"
subjects_path = "subjects"
user_names_path = "userNames"
kind = 'RoleBinding'
def __init__(self, content):
'''RoleBinding constructor'''
super(RoleBinding, self).__init__(content=content)
self._subjects = None
self._role_ref = None
self._group_names = None
self._user_names = None
@property
def subjects(self):
''' subjects property '''
if self._subjects is None:
self._subjects = self.get_subjects()
return self._subjects
@subjects.setter
def subjects(self, data):
''' subjects property setter'''
self._subjects = data
@property
def role_ref(self):
''' role_ref property '''
if self._role_ref is None:
self._role_ref = self.get_role_ref()
return self._role_ref
@role_ref.setter
def role_ref(self, data):
''' role_ref property setter'''
self._role_ref = data
@property
def group_names(self):
''' group_names property '''
if self._group_names is None:
self._group_names = self.get_group_names()
return self._group_names
@group_names.setter
def group_names(self, data):
''' group_names property setter'''
self._group_names = data
@property
def user_names(self):
''' user_names property '''
if self._user_names is None:
self._user_names = self.get_user_names()
return self._user_names
@user_names.setter
def user_names(self, data):
''' user_names property setter'''
self._user_names = data
def get_group_names(self):
''' return groupNames '''
return self.get(RoleBinding.group_names_path) or []
def get_user_names(self):
''' return usernames '''
return self.get(RoleBinding.user_names_path) or []
def get_role_ref(self):
''' return role_ref '''
return self.get(RoleBinding.role_ref_path) or {}
def get_subjects(self):
''' return subjects '''
return self.get(RoleBinding.subjects_path) or []
#### ADD #####
def add_subject(self, inc_subject):
''' add a subject '''
if self.subjects:
# pylint: disable=no-member
self.subjects.append(inc_subject)
else:
self.put(RoleBinding.subjects_path, [inc_subject])
return True
def add_role_ref(self, inc_role_ref):
''' add a role_ref '''
if not self.role_ref:
self.put(RoleBinding.role_ref_path, {"name": inc_role_ref})
return True
return False
def add_group_names(self, inc_group_names):
''' add a group_names '''
if self.group_names:
# pylint: disable=no-member
self.group_names.append(inc_group_names)
else:
self.put(RoleBinding.group_names_path, [inc_group_names])
return True
def add_user_name(self, inc_user_name):
''' add a username '''
if self.user_names:
# pylint: disable=no-member
self.user_names.append(inc_user_name)
else:
self.put(RoleBinding.user_names_path, [inc_user_name])
return True
#### /ADD #####
#### Remove #####
def remove_subject(self, inc_subject):
''' remove a subject '''
try:
# pylint: disable=no-member
self.subjects.remove(inc_subject)
except ValueError as _:
return False
return True
def remove_role_ref(self, inc_role_ref):
''' remove a role_ref '''
if self.role_ref and self.role_ref['name'] == inc_role_ref:
del self.role_ref['name']
return True
return False
def remove_group_name(self, inc_group_name):
''' remove a groupname '''
try:
# pylint: disable=no-member
self.group_names.remove(inc_group_name)
except ValueError as _:
return False
return True
def remove_user_name(self, inc_user_name):
''' remove a username '''
try:
# pylint: disable=no-member
self.user_names.remove(inc_user_name)
except ValueError as _:
return False
return True
#### /REMOVE #####
#### UPDATE #####
def update_subject(self, inc_subject):
''' update a subject '''
try:
# pylint: disable=no-member
index = self.subjects.index(inc_subject)
except ValueError as _:
return self.add_subject(inc_subject)
self.subjects[index] = inc_subject
return True
def update_group_name(self, inc_group_name):
''' update a groupname '''
try:
# pylint: disable=no-member
index = self.group_names.index(inc_group_name)
except ValueError as _:
return self.add_group_names(inc_group_name)
self.group_names[index] = inc_group_name
return True
def update_user_name(self, inc_user_name):
''' update a username '''
try:
# pylint: disable=no-member
index = self.user_names.index(inc_user_name)
except ValueError as _:
return self.add_user_name(inc_user_name)
self.user_names[index] = inc_user_name
return True
def update_role_ref(self, inc_role_ref):
''' update a role_ref '''
self.role_ref['name'] = inc_role_ref
return True
#### /UPDATE #####
#### FIND ####
def find_subject(self, inc_subject):
''' find a subject '''
index = None
try:
# pylint: disable=no-member
index = self.subjects.index(inc_subject)
except ValueError as _:
return index
return index
def find_group_name(self, inc_group_name):
''' find a group_name '''
index = None
try:
# pylint: disable=no-member
index = self.group_names.index(inc_group_name)
except ValueError as _:
return index
return index
def find_user_name(self, inc_user_name):
''' find a user_name '''
index = None
try:
# pylint: disable=no-member
index = self.user_names.index(inc_user_name)
except ValueError as _:
return index
return index
def find_role_ref(self, inc_role_ref):
''' find a user_name '''
if self.role_ref and self.role_ref['name'] == inc_role_ref['name']:
return self.role_ref
return None
# -*- -*- -*- End included fragment: lib/rolebinding.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_adm_router.py -*- -*- -*-
class RouterException(Exception):
''' Router exception'''
pass
class RouterConfig(OpenShiftCLIConfig):
''' RouterConfig is a DTO for the router. '''
def __init__(self, rname, namespace, kubeconfig, router_options):
super(RouterConfig, self).__init__(rname, namespace, kubeconfig, router_options)
class Router(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
def __init__(self,
router_config,
verbose=False):
''' Constructor for OpenshiftOC
a router consists of 3 or more parts
- dc/router
- svc/router
- sa/router
- secret/router-certs
- clusterrolebinding/router-router-role
'''
super(Router, self).__init__('default', router_config.kubeconfig, verbose)
self.config = router_config
self.verbose = verbose
self.router_parts = [{'kind': 'dc', 'name': self.config.name},
{'kind': 'svc', 'name': self.config.name},
{'kind': 'sa', 'name': self.config.config_options['service_account']['value']},
{'kind': 'secret', 'name': self.config.name + '-certs'},
{'kind': 'clusterrolebinding', 'name': 'router-' + self.config.name + '-role'},
]
self.__prepared_router = None
self.dconfig = None
self.svc = None
self._secret = None
self._serviceaccount = None
self._rolebinding = None
@property
def prepared_router(self):
''' property for the prepared router'''
if self.__prepared_router is None:
results = self._prepare_router()
if not results or 'returncode' in results and results['returncode'] != 0:
if 'stderr' in results:
raise RouterException('Could not perform router preparation: %s' % results['stderr'])
raise RouterException('Could not perform router preparation.')
self.__prepared_router = results
return self.__prepared_router
@prepared_router.setter
def prepared_router(self, obj):
'''setter for the prepared_router'''
self.__prepared_router = obj
@property
def deploymentconfig(self):
''' property deploymentconfig'''
return self.dconfig
@deploymentconfig.setter
def deploymentconfig(self, config):
''' setter for property deploymentconfig '''
self.dconfig = config
@property
def service(self):
''' property for service '''
return self.svc
@service.setter
def service(self, config):
''' setter for property service '''
self.svc = config
@property
def secret(self):
''' property secret '''
return self._secret
@secret.setter
def secret(self, config):
''' setter for property secret '''
self._secret = config
@property
def serviceaccount(self):
''' property for serviceaccount '''
return self._serviceaccount
@serviceaccount.setter
def serviceaccount(self, config):
''' setter for property serviceaccount '''
self._serviceaccount = config
@property
def rolebinding(self):
''' property rolebinding '''
return self._rolebinding
@rolebinding.setter
def rolebinding(self, config):
''' setter for property rolebinding '''
self._rolebinding = config
def get_object_by_kind(self, kind):
'''return the current object kind by name'''
if re.match("^(dc|deploymentconfig)$", kind, flags=re.IGNORECASE):
return self.deploymentconfig
elif re.match("^(svc|service)$", kind, flags=re.IGNORECASE):
return self.service
elif re.match("^(sa|serviceaccount)$", kind, flags=re.IGNORECASE):
return self.serviceaccount
elif re.match("secret", kind, flags=re.IGNORECASE):
return self.secret
elif re.match("clusterrolebinding", kind, flags=re.IGNORECASE):
return self.rolebinding
return None
def get(self):
''' return the self.router_parts '''
self.service = None
self.deploymentconfig = None
self.serviceaccount = None
self.secret = None
self.rolebinding = None
for part in self.router_parts:
result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
self.service = Service(content=result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'sa':
self.serviceaccount = ServiceAccount(content=result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'secret':
self.secret = Secret(content=result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'clusterrolebinding':
self.rolebinding = RoleBinding(content=result['results'][0])
return {'deploymentconfig': self.deploymentconfig,
'service': self.service,
'serviceaccount': self.serviceaccount,
'secret': self.secret,
'clusterrolebinding': self.rolebinding,
}
def exists(self):
'''return a whether svc or dc exists '''
if self.deploymentconfig and self.service and self.secret and self.serviceaccount:
return True
return False
def delete(self):
'''return all pods '''
parts = []
for part in self.router_parts:
parts.append(self._delete(part['kind'], part['name']))
rval = 0
for part in parts:
if part['returncode'] != 0 and not 'already exist' in part['stderr']:
rval = part['returncode']
return {'returncode': rval, 'results': parts}
def add_modifications(self, deploymentconfig):
'''modify the deployment config'''
# We want modifications in the form of edits coming in from the module.
# Let's apply these here
edit_results = []
for edit in self.config.config_options['edits'].get('value', []):
if edit['action'] == 'put':
edit_results.append(deploymentconfig.put(edit['key'],
edit['value']))
if edit['action'] == 'update':
edit_results.append(deploymentconfig.update(edit['key'],
edit['value'],
edit.get('index', None),
edit.get('curr_value', None)))
if edit['action'] == 'append':
edit_results.append(deploymentconfig.append(edit['key'],
edit['value']))
if edit_results and not any([res[0] for res in edit_results]):
return None
return deploymentconfig
# pylint: disable=too-many-branches
def _prepare_router(self):
'''prepare router for instantiation'''
# if cacert, key, and cert were passed, combine them into a pem file
if (self.config.config_options['cacert_file']['value'] and
self.config.config_options['cert_file']['value'] and
self.config.config_options['key_file']['value']):
router_pem = '/tmp/router.pem'
with open(router_pem, 'w') as rfd:
rfd.write(open(self.config.config_options['cert_file']['value']).read())
rfd.write(open(self.config.config_options['key_file']['value']).read())
if self.config.config_options['cacert_file']['value'] and \
os.path.exists(self.config.config_options['cacert_file']['value']):
rfd.write(open(self.config.config_options['cacert_file']['value']).read())
atexit.register(Utils.cleanup, [router_pem])
self.config.config_options['default_cert']['value'] = router_pem
elif self.config.config_options['default_cert']['value'] is None:
# No certificate was passed to us. do not pass one to oc adm router
self.config.config_options['default_cert']['include'] = False
options = self.config.to_option_list(ascommalist='labels')
cmd = ['router', self.config.name]
cmd.extend(options)
cmd.extend(['--dry-run=True', '-o', 'json'])
results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json')
# pylint: disable=maybe-no-member
if results['returncode'] != 0 or 'items' not in results['results']:
return results
oc_objects = {'DeploymentConfig': {'obj': None, 'path': None, 'update': False},
'Secret': {'obj': None, 'path': None, 'update': False},
'ServiceAccount': {'obj': None, 'path': None, 'update': False},
'ClusterRoleBinding': {'obj': None, 'path': None, 'update': False},
'Service': {'obj': None, 'path': None, 'update': False},
}
# pylint: disable=invalid-sequence-index
for res in results['results']['items']:
if res['kind'] == 'DeploymentConfig':
oc_objects['DeploymentConfig']['obj'] = DeploymentConfig(res)
elif res['kind'] == 'Service':
oc_objects['Service']['obj'] = Service(res)
elif res['kind'] == 'ServiceAccount':
oc_objects['ServiceAccount']['obj'] = ServiceAccount(res)
elif res['kind'] == 'Secret':
oc_objects['Secret']['obj'] = Secret(res)
elif res['kind'] == 'ClusterRoleBinding':
oc_objects['ClusterRoleBinding']['obj'] = RoleBinding(res)
# Currently only deploymentconfig needs updating
# Verify we got a deploymentconfig
if not oc_objects['DeploymentConfig']['obj']:
return results
# add modifications added
oc_objects['DeploymentConfig']['obj'] = self.add_modifications(oc_objects['DeploymentConfig']['obj'])
for oc_type, oc_data in oc_objects.items():
if oc_data['obj'] is not None:
oc_data['path'] = Utils.create_tmp_file_from_contents(oc_type, oc_data['obj'].yaml_dict)
return oc_objects
def create(self):
'''Create a router
This includes the different parts:
- deploymentconfig
- service
- serviceaccount
- secrets
- clusterrolebinding
'''
results = []
self.needs_update()
import time
# pylint: disable=maybe-no-member
for kind, oc_data in self.prepared_router.items():
if oc_data['obj'] is not None:
time.sleep(1)
if self.get_object_by_kind(kind) is None:
results.append(self._create(oc_data['path']))
elif oc_data['update']:
results.append(self._replace(oc_data['path']))
rval = 0
for result in results:
if result['returncode'] != 0 and not 'already exist' in result['stderr']:
rval = result['returncode']
return {'returncode': rval, 'results': results}
def update(self):
'''run update for the router. This performs a replace'''
results = []
# pylint: disable=maybe-no-member
for _, oc_data in self.prepared_router.items():
if oc_data['update']:
results.append(self._replace(oc_data['path']))
rval = 0
for result in results:
if result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
def needs_update(self):
''' check to see if we need to update '''
# ServiceAccount:
# Need to determine changes from the pregenerated ones from the original
# Since these are auto generated, we can skip
skip = ['secrets', 'imagePullSecrets']
if self.serviceaccount is None or \
not Utils.check_def_equal(self.prepared_router['ServiceAccount']['obj'].yaml_dict,
self.serviceaccount.yaml_dict,
skip_keys=skip,
debug=self.verbose):
self.prepared_router['ServiceAccount']['update'] = True
# Secret:
# See if one was generated from our dry-run and verify it if needed
if self.prepared_router['Secret']['obj']:
if not self.secret:
self.prepared_router['Secret']['update'] = True
if self.secret is None or \
not Utils.check_def_equal(self.prepared_router['Secret']['obj'].yaml_dict,
self.secret.yaml_dict,
skip_keys=skip,
debug=self.verbose):
self.prepared_router['Secret']['update'] = True
# Service:
# Fix the ports to have protocol=TCP
for port in self.prepared_router['Service']['obj'].get('spec.ports'):
port['protocol'] = 'TCP'
skip = ['portalIP', 'clusterIP', 'sessionAffinity', 'type']
if self.service is None or \
not Utils.check_def_equal(self.prepared_router['Service']['obj'].yaml_dict,
self.service.yaml_dict,
skip_keys=skip,
debug=self.verbose):
self.prepared_router['Service']['update'] = True
# DeploymentConfig:
# Router needs some exceptions.
# We do not want to check the autogenerated password for stats admin
if self.deploymentconfig is not None:
if not self.config.config_options['stats_password']['value']:
for idx, env_var in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
'spec.template.spec.containers[0].env') or []):
if env_var['name'] == 'STATS_PASSWORD':
env_var['value'] = \
self.deploymentconfig.get('spec.template.spec.containers[0].env[%s].value' % idx)
break
# dry-run doesn't add the protocol to the ports section. We will manually do that.
for idx, port in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
'spec.template.spec.containers[0].ports') or []):
if not 'protocol' in port:
port['protocol'] = 'TCP'
# These are different when generating
skip = ['dnsPolicy',
'terminationGracePeriodSeconds',
'restartPolicy', 'timeoutSeconds',
'livenessProbe', 'readinessProbe',
'terminationMessagePath', 'hostPort',
'defaultMode',
]
if self.deploymentconfig is None or \
not Utils.check_def_equal(self.prepared_router['DeploymentConfig']['obj'].yaml_dict,
self.deploymentconfig.yaml_dict,
skip_keys=skip,
debug=self.verbose):
self.prepared_router['DeploymentConfig']['update'] = True
# Check if any of the parts need updating, if so, return True
# else, no need to update
# pylint: disable=no-member
return any([self.prepared_router[oc_type]['update'] for oc_type in self.prepared_router.keys()])
@staticmethod
def run_ansible(params, check_mode):
'''run ansible idempotent code'''
rconfig = RouterConfig(params['name'],
params['namespace'],
params['kubeconfig'],
{'default_cert': {'value': params['default_cert'], 'include': True},
'cert_file': {'value': params['cert_file'], 'include': False},
'key_file': {'value': params['key_file'], 'include': False},
'images': {'value': params['images'], 'include': True},
'latest_images': {'value': params['latest_images'], 'include': True},
'labels': {'value': params['labels'], 'include': True},
'ports': {'value': ','.join(params['ports']), 'include': True},
'replicas': {'value': params['replicas'], 'include': True},
'selector': {'value': params['selector'], 'include': True},
'service_account': {'value': params['service_account'], 'include': True},
'router_type': {'value': params['router_type'], 'include': False},
'host_network': {'value': params['host_network'], 'include': True},
'external_host': {'value': params['external_host'], 'include': True},
'external_host_vserver': {'value': params['external_host_vserver'],
'include': True},
'external_host_insecure': {'value': params['external_host_insecure'],
'include': True},
'external_host_partition_path': {'value': params['external_host_partition_path'],
'include': True},
'external_host_username': {'value': params['external_host_username'],
'include': True},
'external_host_password': {'value': params['external_host_password'],
'include': True},
'external_host_private_key': {'value': params['external_host_private_key'],
'include': True},
'expose_metrics': {'value': params['expose_metrics'], 'include': True},
'metrics_image': {'value': params['metrics_image'], 'include': True},
'stats_user': {'value': params['stats_user'], 'include': True},
'stats_password': {'value': params['stats_password'], 'include': True},
'stats_port': {'value': params['stats_port'], 'include': True},
# extra
'cacert_file': {'value': params['cacert_file'], 'include': False},
# edits
'edits': {'value': params['edits'], 'include': False},
})
state = params['state']
ocrouter = Router(rconfig, verbose=params['debug'])
api_rval = ocrouter.get()
########
# get
########
if state == 'list':
return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
if not ocrouter.exists():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
# In case of delete we return a list of each object
# that represents a router and its result in a list
# pylint: disable=redefined-variable-type
api_rval = ocrouter.delete()
return {'changed': True, 'results': api_rval, 'state': state}
if state == 'present':
########
# Create
########
if not ocrouter.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
api_rval = ocrouter.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if not ocrouter.needs_update():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': False, 'msg': 'CHECK_MODE: Would have performed an update.'}
api_rval = ocrouter.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
# -*- -*- -*- End included fragment: class/oc_adm_router.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_adm_router.py -*- -*- -*-
def main():
'''
ansible oc module for router
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str',
choices=['present', 'absent']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default='router', type='str'),
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
default_cert=dict(default=None, type='str'),
cert_file=dict(default=None, type='str'),
key_file=dict(default=None, type='str'),
images=dict(default=None, type='str'), #'openshift3/ose-${component}:${version}'
latest_images=dict(default=False, type='bool'),
labels=dict(default=None, type='dict'),
ports=dict(default=['80:80', '443:443'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
service_account=dict(default='router', type='str'),
router_type=dict(default='haproxy-router', type='str'),
host_network=dict(default=True, type='bool'),
# external host options
external_host=dict(default=None, type='str'),
external_host_vserver=dict(default=None, type='str'),
external_host_insecure=dict(default=False, type='bool'),
external_host_partition_path=dict(default=None, type='str'),
external_host_username=dict(default=None, type='str'),
external_host_password=dict(default=None, type='str', no_log=True),
external_host_private_key=dict(default=None, type='str', no_log=True),
# Metrics
expose_metrics=dict(default=False, type='bool'),
metrics_image=dict(default=None, type='str'),
# Stats
stats_user=dict(default=None, type='str'),
stats_password=dict(default=None, type='str', no_log=True),
stats_port=dict(default=1936, type='int'),
# extra
cacert_file=dict(default=None, type='str'),
# edits
edits=dict(default=[], type='list'),
),
mutually_exclusive=[["router_type", "images"],
["key_file", "default_cert"],
["cert_file", "default_cert"],
["cacert_file", "default_cert"],
],
required_together=[['cacert_file', 'cert_file', 'key_file']],
supports_check_mode=True,
)
results = Router.run_ansible(module.params, module.check_mode)
if 'failed' in results:
module.fail_json(**results)
module.exit_json(**results)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_adm_router.py -*- -*- -*-
|
zhiwliu/openshift-ansible
|
roles/lib_openshift/library/oc_adm_router.py
|
Python
|
apache-2.0
| 109,527
|
# Copyright 2014 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
PROVIDER_NW_EXT = 'Provider Network'
PORTBINDING_EXT = 'Port Binding'
|
jcsp/manila
|
manila/network/neutron/constants.py
|
Python
|
apache-2.0
| 705
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Subversion(AutotoolsPackage):
"""Apache Subversion - an open source version control system."""
homepage = 'https://subversion.apache.org/'
urls = [
'https://archive.apache.org/dist/subversion/subversion-1.12.2.tar.gz',
'https://downloads.apache.org/subversion/subversion-1.13.0.tar.gz'
]
version('1.14.1', sha256='dee2796abaa1f5351e6cc2a60b1917beb8238af548b20d3e1ec22760ab2f0cad')
version('1.14.0', sha256='ef3d1147535e41874c304fb5b9ea32745fbf5d7faecf2ce21d4115b567e937d0')
version('1.13.0', sha256='daad440c03b8a86fcca804ea82217bb1902cfcae1b7d28c624143c58dcb96931')
version('1.12.2', sha256='f4927d6603d96c5ddabebbafe9a0f6833c18a891ff0ce1ea6ffd186ce9bc21f3')
version('1.9.7', sha256='c72a209c883e20245f14c4e644803f50ae83ae24652e385ff5e82300a0d06c3c')
version('1.9.6', sha256='a400cbc46d05cb29f2d7806405bb539e9e045b24013b0f12f8f82688513321a7')
version('1.9.5', sha256='280ba586c5d51d7b976b65d22d5e8e42f3908ed1c968d71120dcf534ce857a83')
version('1.9.3', sha256='74cd21d2f8a2a54e4dbd2389fe1605a19dbda8ba88ffc4bb0edc9a66e143cc93')
version('1.8.17', sha256='1b2cb9a0ca454035e55b114ee91c6433b9ede6c2893f2fb140939094d33919e4')
version('1.8.13', sha256='17e8900a877ac9f0d5ef437c20df437fec4eb2c5cb9882609d2277e2312da52c')
variant('serf', default=True, description='Serf HTTP client library')
variant('perl', default=False, description='Build with Perl bindings')
depends_on('apr')
depends_on('apr-util')
depends_on('zlib')
depends_on('sqlite@3.8.2:')
depends_on('expat')
depends_on('lz4', when='@1.10:')
depends_on('utf8proc', when='@1.10:')
depends_on('serf', when='+serf')
extends('perl', when='+perl')
depends_on('swig@1.3.24:3.0.0', when='+perl')
depends_on('perl-termreadkey', when='+perl')
# https://www.linuxfromscratch.org/blfs/view/svn/general/subversion.html
def configure_args(self):
spec = self.spec
args = [
'--with-apr={0}'.format(spec['apr'].prefix),
'--with-apr-util={0}'.format(spec['apr-util'].prefix),
'--with-sqlite={0}'.format(spec['sqlite'].prefix),
'--with-expat={0}:{1}:{2}'.format(
spec['expat'].headers.directories[0],
spec['expat'].libs.directories[0],
spec['expat'].libs.names[0]
),
'--with-zlib={0}'.format(spec['zlib'].prefix),
'--without-apxs',
'--without-trang',
'--without-doxygen',
'--without-berkeley-db',
'--without-sasl',
'--without-libmagic',
'--without-kwallet',
'--without-jdk',
'--without-boost',
]
if spec.satisfies('@1.10:'):
args.extend([
'--with-lz4={0}'.format(spec['lz4'].prefix),
'--with-utf8proc={0}'.format(spec['utf8proc'].prefix),
])
if '+serf' in spec:
args.append('--with-serf={0}'.format(spec['serf'].prefix))
else:
args.append('--without-serf')
if 'swig' in spec:
args.append('--with-swig={0}'.format(spec['swig'].prefix))
else:
args.append('--without-swig')
if '+perl' in spec:
args.append('PERL={0}'.format(spec['perl'].command.path))
return args
def build(self, spec, prefix):
make()
if '+perl' in spec:
make('swig-pl')
with working_dir(join_path(
'subversion', 'bindings', 'swig', 'perl', 'native')):
perl = spec['perl'].command
perl('Makefile.PL', 'INSTALL_BASE={0}'.format(prefix))
def check(self):
make('check')
if '+perl' in self.spec:
make('check-swig-pl')
def install(self, spec, prefix):
make('install', parallel=False)
if '+perl' in spec:
make('install-swig-pl-lib')
with working_dir(join_path(
'subversion', 'bindings', 'swig', 'perl', 'native')):
make('install')
|
LLNL/spack
|
var/spack/repos/builtin/packages/subversion/package.py
|
Python
|
lgpl-2.1
| 4,334
|
from django import forms
from account.forms import SettingsForm as AccountSettingsForm
from .models import ADOPTION_LEVEL_CHOICES
ADOPTION_LEVEL_HELP_TEXT = """
<b>Adoption level</b> determines when a new survey or game will be made available to you.
<small>
If you want access as soon as it’s launched, select <b>Bleeding Edge</b>.
If you want to wait until <i>at least 10 other people</i> have tried it, select <b>Early Adopter</b>.
If you want to wait until <i>at least 100 other people</i> have tried it, select <b>Mainstream</b>.
</small>
"""
class SettingsForm(AccountSettingsForm):
adoption_level = forms.ChoiceField(choices=ADOPTION_LEVEL_CHOICES, help_text=ADOPTION_LEVEL_HELP_TEXT)
|
jtauber/learning-greek
|
learning_greek/forms.py
|
Python
|
mit
| 709
|
import threading
from coiot.device_action_list import DeviceActionList, DALDevice
from ble.device import CompositeBleDevice, drivers as ble_drivers
from gi.repository import GLib
from . import db_interface
import logging
import time
log = logging.getLogger('BLE')
db_interface.BLEDriverParameters.register()
class BluezBLEDriver(threading.Thread):
def __init__(self, adapter, updates, autostart=True, drivers=ble_drivers):
super().__init__()
self.adapter = adapter
self.drivers = drivers
self.action_list = DeviceActionList()
self.updates = updates
self.cache = {}
self.ble_devices = {}
db_interface.BLEDriverParameters.register_driver(self)
self.stopped = False
self.adapter.proxy.Powered = True
if autostart:
self.start()
def stop(self):
self.stopped = True
def refresh_devices(self):
for a, d in self.adapter.devices.items():
try:
if a in self.ble_devices:
if not d.proxy.Connected:
del self.ble_devices[a]
else:
continue
if not d.proxy.Connected:
continue
for driver in self.drivers:
driver_devices = driver.probe(d)
for i, v in driver_devices.items():
da = self.ble_devices.setdefault(a, {})
da.setdefault(i, CompositeBleDevice()).extend(v)
except GLib.Error as e:
epart = e.message.split(':')
if epart[0] != "GDBus.Error":
raise
if not epart[1].startswith("org.bluez.Error"):
raise
emsg = ':'.join(epart[1:])
log.error("{}: {}".format(d, emsg))
def run(self):
while not self.stopped:
dkprev = set(self.ble_devices.keys())
self.refresh_devices()
for a in set(self.ble_devices.keys()).union(dkprev):
for cd in self.cache[a].values():
cd.Online = (a in self.ble_devices.keys())
if not self.ble_devices:
time.sleep(1)
continue
t = self.action_list.pop()
if t is not None:
d, k, v = t
ble_dev = self.ble_devices[d.Mac][d.Idx]
setattr(ble_dev, k, v)
setattr(self.devices[d.Mac], k, v)
log.info("update {}[{}] {} = {}".format(d.Mac, d.Idx, k, v))
def register(self, cache):
da = self.cache.setdefault(cache.Mac, {})
da[cache.Idx] = DALDevice(cache, self.updates)
return DALDevice(cache, self.action_list)
def __str__(self):
return type(self).__name__
|
coiot-ble/coiotd
|
ble/driver.py
|
Python
|
apache-2.0
| 2,852
|
# -*- coding: utf-8 -*-
"""Extension management."""
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.utils.encoding import smart_str
class ModoExtension(object):
"""
Base extension class.
Each Modoboa extension must inherit from this class to be
considered as valid.
"""
name = None
label = None
version = "NA"
description = ""
needs_media = False
always_active = False
url = None
topredirection_url = None
def get_url(self):
"""Return extension base url."""
if self.url is None:
return self.name
return self.url
def infos(self):
"""Information about this extension."""
return {
"name": self.name, "label": self.label, "version": self.version,
"description": self.description, "url": self.get_url(),
"topredirection_url": self.topredirection_url,
"always_active": self.always_active
}
def load_initial_data(self):
"""Declare extension data in this method."""
pass
def load(self):
"""Add extension loading tasks in this method."""
pass
class ExtensionsPool(object):
"""The extensions manager"""
def __init__(self):
self.extensions = {}
def register_extension(self, ext, show=True):
"""Register an extension.
:param ext: a class inheriting from ``Extension``
:param show: list the extension or not
"""
self.extensions[ext.name] = {"cls": ext, "show": show}
def get_extension(self, name):
"""Retrieve the current instance of an extension."""
if name not in self.extensions:
return None
if "instance" not in self.extensions[name]:
self.extensions[name]["instance"] = self.extensions[name]["cls"]()
return self.extensions[name]["instance"]
def get_extension_infos(self, name):
"""Return information about the specified extension."""
instance = self.get_extension(name)
if instance is None:
return None
return instance.infos()
def load_extension(self, name):
"""Load a registered extension."""
__import__(name, locals(), globals(), [smart_str("modo_extension")])
extinstance = self.get_extension(name)
if extinstance is None:
return None
extinstance.load()
return extinstance
def load_all(self):
"""Load all defined extensions.
Each extension must be loaded in order to integrate with
Modoboa. Only enabled and special extensions are loaded but
urls are always returned. The reason is urls are imported only
once so must know all of them when the python process
starts. Otherwise, it would lead to unexpected 404 errors :p
:return: a list of url maps
"""
for ext in settings.MODOBOA_APPS:
self.load_extension(ext)
def get_urls(self, category="app"):
"""Get all urls defined by extensions."""
result = []
for ext_name in list(self.extensions.keys()):
ext = self.get_extension(ext_name)
options = {}
if category == "api":
root = ""
pattern = "{}.urls_api"
else:
root = r"^{}/".format(ext.get_url())
options.update({"namespace": ext_name})
pattern = "{}.urls"
try:
result.append(
url(root, include(pattern.format(ext_name), **options))
)
except ImportError:
# No urls for this extension
pass
return result
def list_all(self):
"""List all defined extensions."""
result = []
for extname, extdef in list(self.extensions.items()):
if not extdef["show"]:
continue
infos = self.get_extension_infos(extname)
infos["id"] = extname
result += [infos]
return sorted(result, key=lambda i: i["name"])
exts_pool = ExtensionsPool()
|
tonioo/modoboa
|
modoboa/core/extensions.py
|
Python
|
isc
| 4,212
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Private version of MixMatch: we only changed the datasets.
"""
import os
from absl import app
from absl import flags
from libml import utils
from mixmatch import MixMatch
from privacy.lib.data_pair import DATASETS
FLAGS = flags.FLAGS
def main(argv):
del argv # Unused.
assert FLAGS.nu == 2
dataset = DATASETS[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = MixMatch(
os.path.join(FLAGS.train_dir, dataset.name),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
ema=FLAGS.ema,
beta=FLAGS.beta,
w_match=FLAGS.w_match,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('wd', 0.04, 'Weight decay.')
flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')
flags.DEFINE_float('beta', 0.75, 'Mixup beta distribution.')
flags.DEFINE_float('w_match', 75, 'Weight for distribution matching loss.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
FLAGS.set_default('dataset', 'cifar10')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 13)
app.run(main)
|
google-research/mixmatch
|
privacy/pr_mixmatch.py
|
Python
|
apache-2.0
| 2,175
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Search Architecture:
- Have a list of accounts
- Create an "overseer" thread
- Search Overseer:
- Tracks incoming new location values
- Tracks "paused state"
- During pause or new location will clears current search queue
- Starts search_worker threads
- Search Worker Threads each:
- Have a unique API login
- Listens to the same Queue for areas to scan
- Can re-login as needed
- Pushes finds to db queue and webhook queue
'''
import logging
import math
import json
import os
import random
import time
import geopy
import geopy.distance
from datetime import datetime
from operator import itemgetter
from threading import Thread
from queue import Queue, Empty
from pgoapi import PGoApi
from pgoapi.utilities import f2i
from pgoapi import utilities as util
from pgoapi.exceptions import AuthException
from .models import parse_map, Pokemon, hex_bounds, GymDetails, parse_gyms, MainWorker, WorkerStatus
from .transform import generate_location_steps
from .fakePogoApi import FakePogoApi
from .utils import now
import terminalsize
log = logging.getLogger(__name__)
TIMESTAMP = '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000'
# Apply a location jitter
def jitterLocation(location=None, maxMeters=10):
origin = geopy.Point(location[0], location[1])
b = random.randint(0, 360)
d = math.sqrt(random.random()) * (float(maxMeters) / 1000)
destination = geopy.distance.distance(kilometers=d).destination(origin, b)
return (destination.latitude, destination.longitude, location[2])
# gets the current time past the hour
def cur_sec():
return (60 * time.gmtime().tm_min) + time.gmtime().tm_sec
# Thread to handle user input
def switch_status_printer(display_type, current_page):
# Get a reference to the root logger
mainlog = logging.getLogger()
# Disable logging of the first handler - the stream handler, and disable it's output
mainlog.handlers[0].setLevel(logging.CRITICAL)
while True:
# Wait for the user to press a key
command = raw_input()
if command == '':
# Switch between logging and display.
if display_type[0] != 'logs':
# Disable display, enable on screen logging
mainlog.handlers[0].setLevel(logging.DEBUG)
display_type[0] = 'logs'
# If logs are going slowly, sometimes it's hard to tell you switched. Make it clear.
print 'Showing logs...'
elif display_type[0] == 'logs':
# Enable display, disable on screen logging (except for critical messages)
mainlog.handlers[0].setLevel(logging.CRITICAL)
display_type[0] = 'workers'
elif command.isdigit():
current_page[0] = int(command)
mainlog.handlers[0].setLevel(logging.CRITICAL)
display_type[0] = 'workers'
elif command.lower() == 'f':
mainlog.handlers[0].setLevel(logging.CRITICAL)
display_type[0] = 'failedaccounts'
# Thread to print out the status of each worker
def status_printer(threadStatus, search_items_queue, db_updates_queue, wh_queue, account_queue, account_failures):
display_type = ["workers"]
current_page = [1]
# Start another thread to get user input
t = Thread(target=switch_status_printer,
name='switch_status_printer',
args=(display_type, current_page))
t.daemon = True
t.start()
while True:
time.sleep(1)
if display_type[0] == 'logs':
# In log display mode, we don't want to show anything
continue
# Create a list to hold all the status lines, so they can be printed all at once to reduce flicker
status_text = []
if display_type[0] == 'workers':
# Get the terminal size
width, height = terminalsize.get_terminal_size()
# Queue and overseer take 2 lines. Switch message takes up 2 lines. Remove an extra 2 for things like screen status lines.
usable_height = height - 6
# Prevent people running terminals only 6 lines high from getting a divide by zero
if usable_height < 1:
usable_height = 1
# Calculate total skipped items
skip_total = 0
for item in threadStatus:
if 'skip' in threadStatus[item]:
skip_total += threadStatus[item]['skip']
# Print the queue length
status_text.append('Queues: {} search items, {} db updates, {} webhook. Total skipped items: {}. Spare accounts available: {}. Accounts on hold: {}'.format(search_items_queue.qsize(), db_updates_queue.qsize(), wh_queue.qsize(), skip_total, account_queue.qsize(), len(account_failures)))
# Print status of overseer
status_text.append('{} Overseer: {}'.format(threadStatus['Overseer']['method'], threadStatus['Overseer']['message']))
# Calculate the total number of pages. Subtracting 1 for the overseer.
total_pages = math.ceil((len(threadStatus) - 1) / float(usable_height))
# Prevent moving outside the valid range of pages
if current_page[0] > total_pages:
current_page[0] = total_pages
if current_page[0] < 1:
current_page[0] = 1
# Calculate which lines to print
start_line = usable_height * (current_page[0] - 1)
end_line = start_line + usable_height
current_line = 1
# Find the longest username and proxy
userlen = 4
proxylen = 5
for item in threadStatus:
if threadStatus[item]['type'] == 'Worker':
userlen = max(userlen, len(threadStatus[item]['user']))
if 'proxy_display' in threadStatus[item]:
proxylen = max(proxylen, len(str(threadStatus[item]['proxy_display'])))
# How pretty
status = '{:10} | {:5} | {:' + str(userlen) + '} | {:' + str(proxylen) + '} | {:7} | {:6} | {:5} | {:7} | {:10}'
# Print the worker status
status_text.append(status.format('Worker ID', 'Start', 'User', 'Proxy', 'Success', 'Failed', 'Empty', 'Skipped', 'Message'))
for item in sorted(threadStatus):
if(threadStatus[item]['type'] == 'Worker'):
current_line += 1
# Skip over items that don't belong on this page
if current_line < start_line:
continue
if current_line > end_line:
break
status_text.append(status.format(item, time.strftime('%H:%M', time.localtime(threadStatus[item]['starttime'])), threadStatus[item]['user'], threadStatus[item]['proxy_display'], threadStatus[item]['success'], threadStatus[item]['fail'], threadStatus[item]['noitems'], threadStatus[item]['skip'], threadStatus[item]['message']))
elif display_type[0] == 'failedaccounts':
status_text.append('-----------------------------------------')
status_text.append('Accounts on hold:')
status_text.append('-----------------------------------------')
# Find the longest account name
userlen = 4
for account in account_failures:
userlen = max(userlen, len(account['account']['username']))
status = '{:' + str(userlen) + '} | {:10} | {:20}'
status_text.append(status.format('User', 'Hold Time', 'Reason'))
for account in account_failures:
status_text.append(status.format(account['account']['username'], time.strftime('%H:%M:%S', time.localtime(account['last_fail_time'])), account['reason']))
# Print the status_text for the current screen
status_text.append('Page {}/{}. Page number to switch pages. F to show on hold accounts. <ENTER> alone to switch between status and log view'.format(current_page[0], total_pages))
# Clear the screen
os.system('cls' if os.name == 'nt' else 'clear')
# Print status
print "\n".join(status_text)
# The account recycler monitors failed accounts and places them back in the account queue 2 hours after they failed.
# This allows accounts that were soft banned to be retried after giving them a chance to cool down.
def account_recycler(accounts_queue, account_failures, args):
while True:
# Run once a minute
time.sleep(60)
log.info('Account recycler running. Checking status of {} accounts'.format(len(account_failures)))
# Create a new copy of the failure list to search through, so we can iterate through it without it changing
failed_temp = list(account_failures)
# Search through the list for any item that last failed before 2 hours ago
ok_time = now() - args.account_rest_interval
for a in failed_temp:
if a['last_fail_time'] <= ok_time:
# Remove the account from the real list, and add to the account queue
log.info('Account {} returning to active duty.'.format(a['account']['username']))
account_failures.remove(a)
accounts_queue.put(a['account'])
else:
log.info('Account {} needs to cool off for {} seconds due to {}'.format(a['account']['username'], a['last_fail_time'] - ok_time, a['reason']))
def worker_status_db_thread(threads_status, name, db_updates_queue):
log.info("Clearing previous statuses for '%s' worker", name)
WorkerStatus.delete().where(WorkerStatus.worker_name == name).execute()
while True:
workers = {}
overseer = None
for status in threads_status.values():
if status['type'] == 'Overseer':
overseer = {
'worker_name': name,
'message': status['message'],
'method': status['method'],
'last_modified': datetime.utcnow()
}
if status['type'] == 'Worker':
workers[status['user']] = {
'username': status['user'],
'worker_name': name,
'success': status['success'],
'fail': status['fail'],
'no_items': status['noitems'],
'skip': status['skip'],
'last_modified': datetime.utcnow(),
'message': status['message']
}
if overseer is not None:
db_updates_queue.put((MainWorker, {0: overseer}))
db_updates_queue.put((WorkerStatus, workers))
time.sleep(3)
# The main search loop that keeps an eye on the over all process
def search_overseer_thread(args, method, new_location_queue, pause_bit, encryption_lib_path, db_updates_queue, wh_queue):
log.info('Search overseer starting')
search_items_queue = Queue()
account_queue = Queue()
threadStatus = {}
'''
Create a queue of accounts for workers to pull from. When a worker has failed too many times,
it can get a new account from the queue and reinitialize the API. Workers should return accounts
to the queue so they can be tried again later, but must wait a bit before doing do so to
prevent accounts from being cycled through too quickly.
'''
for i, account in enumerate(args.accounts):
account_queue.put(account)
# Create a list for failed accounts
account_failures = []
threadStatus['Overseer'] = {
'message': 'Initializing',
'type': 'Overseer',
'method': 'Hex Grid' if method == 'hex' else 'Spawn Point'
}
if(args.print_status):
log.info('Starting status printer thread')
t = Thread(target=status_printer,
name='status_printer',
args=(threadStatus, search_items_queue, db_updates_queue, wh_queue, account_queue, account_failures))
t.daemon = True
t.start()
# Create account recycler thread
log.info('Starting account recycler thread')
t = Thread(target=account_recycler, name='account-recycler', args=(account_queue, account_failures, args))
t.daemon = True
t.start()
if args.status_name is not None:
log.info('Starting status database thread')
t = Thread(target=worker_status_db_thread,
name='status_worker_db',
args=(threadStatus, args.status_name, db_updates_queue))
t.daemon = True
t.start()
# Create specified number of search_worker_thread
log.info('Starting search worker threads')
for i in range(0, args.workers):
log.debug('Starting search worker thread %d', i)
# Set proxy for each worker, using round robin
proxy_display = 'No'
proxy_url = False
if args.proxy:
proxy_display = proxy_url = args.proxy[i % len(args.proxy)]
if args.proxy_display.upper() != 'FULL':
proxy_display = i % len(args.proxy)
workerId = 'Worker {:03}'.format(i)
threadStatus[workerId] = {
'type': 'Worker',
'message': 'Creating thread...',
'success': 0,
'fail': 0,
'noitems': 0,
'skip': 0,
'user': '',
'proxy_display': proxy_display,
'proxy_url': proxy_url,
}
t = Thread(target=search_worker_thread,
name='search-worker-{}'.format(i),
args=(args, account_queue, account_failures, search_items_queue, pause_bit,
encryption_lib_path, threadStatus[workerId],
db_updates_queue, wh_queue))
t.daemon = True
t.start()
'''
For hex scanning, we can generate the full list of scan points well
in advance. When then can queue them all up to be searched as fast
as the threads will allow.
With spawn point scanning (sps) we can come up with the order early
on, and we can populate the entire queue, but the individual threads
will need to wait until the point is available (and ensure it is not
to late as well).
'''
# A place to track the current location
current_location = False
# Used to tell SPS to scan for all CURRENT pokemon instead
# of, like during a normal loop, just finding the next one
# which will appear (since you've already scanned existing
# locations in the prior loop)
# Needed in a first loop and pausing/changing location.
sps_scan_current = True
# The real work starts here but will halt on pause_bit.set()
while True:
# paused; clear queue if needed, otherwise sleep and loop
while pause_bit.is_set():
if not search_items_queue.empty():
try:
while True:
search_items_queue.get_nowait()
except Empty:
pass
threadStatus['Overseer']['message'] = 'Scanning is paused'
sps_scan_current = True
time.sleep(1)
# If a new location has been passed to us, get the most recent one
if not new_location_queue.empty():
log.info('New location caught, moving search grid')
sps_scan_current = True
try:
while True:
current_location = new_location_queue.get_nowait()
except Empty:
pass
# We (may) need to clear the search_items_queue
if not search_items_queue.empty():
try:
while True:
search_items_queue.get_nowait()
except Empty:
pass
# If there are no search_items_queue either the loop has finished (or been
# cleared above) -- either way, time to fill it back up
if search_items_queue.empty():
log.debug('Search queue empty, restarting loop')
# locations = [((lat, lng, alt), ts_appears, ts_leaves),...]
if method == 'hex':
locations = get_hex_location_list(args, current_location)
else:
locations = get_sps_location_list(args, current_location, sps_scan_current)
sps_scan_current = False
if len(locations) == 0:
log.warning('Nothing to scan!')
threadStatus['Overseer']['message'] = 'Queuing steps'
for step, step_location in enumerate(locations, 1):
log.debug('Queueing step %d @ %f/%f/%f', step, step_location[0][0], step_location[0][1], step_location[0][2])
search_args = (step, step_location[0], step_location[1], step_location[2])
search_items_queue.put(search_args)
else:
nextitem = search_items_queue.queue[0]
threadStatus['Overseer']['message'] = 'Processing search queue, next item is {:6f},{:6f}'.format(nextitem[1][0], nextitem[1][1])
# If times are specified, print the time of the next queue item, and how many seconds ahead/behind realtime
if nextitem[2]:
threadStatus['Overseer']['message'] += ' @ {}'.format(time.strftime('%H:%M:%S', time.localtime(nextitem[2])))
if nextitem[2] > now():
threadStatus['Overseer']['message'] += ' ({}s ahead)'.format(nextitem[2] - now())
else:
threadStatus['Overseer']['message'] += ' ({}s behind)'.format(now() - nextitem[2])
# Now we just give a little pause here
time.sleep(1)
def get_hex_location_list(args, current_location):
# if we are only scanning for pokestops/gyms, then increase step radius to visibility range
if args.no_pokemon:
step_distance = 0.900
else:
step_distance = 0.070
# update our list of coords
locations = generate_location_steps(current_location, args.step_limit, step_distance)
# In hex "spawns only" mode, filter out scan locations with no history of pokemons
if args.spawnpoints_only and not args.no_pokemon:
n, e, s, w = hex_bounds(current_location, args.step_limit)
spawnpoints = set((d['latitude'], d['longitude']) for d in Pokemon.get_spawnpoints(s, w, n, e))
if len(spawnpoints) == 0:
log.warning('No spawnpoints found in the specified area! (Did you forget to run a normal scan in this area first?)')
def any_spawnpoints_in_range(coords):
return any(geopy.distance.distance(coords, x).meters <= 70 for x in spawnpoints)
locations = [coords for coords in locations if any_spawnpoints_in_range(coords)]
# put into the right struture with zero'ed before/after values
# locations = [(lat, lng, alt, ts_appears, ts_leaves),...]
locationsZeroed = []
for location in locations:
locationsZeroed.append(((location[0], location[1], 0), 0, 0))
return locationsZeroed
def get_sps_location_list(args, current_location, sps_scan_current):
locations = []
# Attempt to load spawns from file
if args.spawnpoint_scanning != 'nofile':
log.debug('Loading spawn points from json file @ %s', args.spawnpoint_scanning)
try:
with open(args.spawnpoint_scanning) as file:
locations = json.load(file)
except ValueError as e:
log.exception(e)
log.error('JSON error: %s; will fallback to database', e)
except IOError as e:
log.error('Error opening json file: %s; will fallback to database', e)
# No locations yet? Try the database!
if not len(locations):
log.debug('Loading spawn points from database')
locations = Pokemon.get_spawnpoints_in_hex(current_location, args.step_limit)
# Well shit...
if not len(locations):
raise Exception('No availabe spawn points!')
# locations[]:
# {"lat": 37.53079079414139, "lng": -122.28811690874117, "spawnpoint_id": "808f9f1601d", "time": 511
log.info('Total of %d spawns to track', len(locations))
locations.sort(key=itemgetter('time'))
if args.very_verbose:
for i in locations:
sec = i['time'] % 60
minute = (i['time'] / 60) % 60
m = 'Scan [{:02}:{:02}] ({}) @ {},{}'.format(minute, sec, i['time'], i['lat'], i['lng'])
log.debug(m)
# 'time' from json and db alike has been munged to appearance time as seconds after the hour
# Here we'll convert that to a real timestamp
for location in locations:
# For a scan which should cover all CURRENT pokemon, we can offset
# the comparison time by 15 minutes so that the "appears" time
# won't be rolled over to the next hour.
# TODO: Make it work. The original logic (commented out) was producing
# bogus results if your first scan was in the last 15 minute of
# the hour. Wrapping my head around this isn't work right now,
# so I'll just drop the feature for the time being. It does need
# to come back so that repositioning/pausing works more nicely,
# but we can live without it too.
# if sps_scan_current:
# cursec = (location['time'] + 900) % 3600
# else:
cursec = location['time']
if cursec > cur_sec():
# hasn't spawn in the current hour
from_now = location['time'] - cur_sec()
appears = now() + from_now
else:
# won't spawn till next hour
late_by = cur_sec() - location['time']
appears = now() + 3600 - late_by
location['appears'] = appears
location['leaves'] = appears + 900
# Put the spawn points in order of next appearance time
locations.sort(key=itemgetter('appears'))
# Match expected structure:
# locations = [((lat, lng, alt), ts_appears, ts_leaves),...]
retset = []
for location in locations:
retset.append(((location['lat'], location['lng'], 40.32), location['appears'], location['leaves']))
return retset
def search_worker_thread(args, account_queue, account_failures, search_items_queue, pause_bit, encryption_lib_path, status, dbq, whq):
log.debug('Search worker thread starting')
# The outer forever loop restarts only when the inner one is intentionally exited - which should only be done when the worker is failing too often, and probably banned.
# This reinitializes the API and grabs a new account from the queue.
while True:
try:
status['starttime'] = now()
# Get account
status['message'] = 'Waiting to get new account from the queue'
log.info(status['message'])
account = account_queue.get()
status['message'] = 'Switching to account {}'.format(account['username'])
status['user'] = account['username']
log.info(status['message'])
stagger_thread(args, account)
# New lease of life right here
status['fail'] = 0
status['success'] = 0
status['noitems'] = 0
status['skip'] = 0
# Create the API instance this will use
if args.mock != '':
api = FakePogoApi(args.mock)
else:
api = PGoApi()
if status['proxy_url']:
log.debug("Using proxy %s", status['proxy_url'])
api.set_proxy({'http': status['proxy_url'], 'https': status['proxy_url']})
api.activate_signature(encryption_lib_path)
# The forever loop for the searches
while True:
# If this account has been messing up too hard, let it rest
if status['fail'] >= args.max_failures:
status['message'] = 'Account {} failed more than {} scans; possibly bad account. Switching accounts...'.format(account['username'], args.max_failures)
log.warning(status['message'])
account_failures.append({'account': account, 'last_fail_time': now(), 'reason': 'failures'})
break # exit this loop to get a new account and have the API recreated
# If this account has been running too long, let it rest
if (args.account_search_interval is not None):
if (status['starttime'] <= (now() - args.account_search_interval)):
status['message'] = 'Account {} is being rotated out to rest.'.format(account['username'])
log.info(status['message'])
account_failures.append({'account': account, 'last_fail_time': now(), 'reason': 'rest interval'})
break
while pause_bit.is_set():
status['message'] = 'Scanning paused'
time.sleep(2)
# Grab the next thing to search (when available)
status['message'] = 'Waiting for item from queue'
step, step_location, appears, leaves = search_items_queue.get()
# too soon?
if appears and now() < appears + 10: # adding a 10 second grace period
first_loop = True
paused = False
while now() < appears + 10:
if pause_bit.is_set():
paused = True
break # why can't python just have `break 2`...
remain = appears - now() + 10
status['message'] = 'Early for {:6f},{:6f}; waiting {}s...'.format(step_location[0], step_location[1], remain)
if first_loop:
log.info(status['message'])
first_loop = False
time.sleep(1)
if paused:
search_items_queue.task_done()
continue
# too late?
if leaves and now() > (leaves - args.min_seconds_left):
search_items_queue.task_done()
status['skip'] += 1
# it is slightly silly to put this in status['message'] since it'll be overwritten very shortly after. Oh well.
status['message'] = 'Too late for location {:6f},{:6f}; skipping'.format(step_location[0], step_location[1])
log.info(status['message'])
# No sleep here; we've not done anything worth sleeping for. Plus we clearly need to catch up!
continue
status['message'] = 'Searching at {:6f},{:6f}'.format(step_location[0], step_location[1])
log.info(status['message'])
# Let the api know where we intend to be for this loop
api.set_position(*step_location)
# Ok, let's get started -- check our login status
check_login(args, account, api, step_location, status['proxy_url'])
# Make the actual request (finally!)
response_dict = map_request(api, step_location, args.jitter)
# G'damnit, nothing back. Mark it up, sleep, carry on
if not response_dict:
status['fail'] += 1
status['message'] = 'Invalid response at {:6f},{:6f}, abandoning location'.format(step_location[0], step_location[1])
log.error(status['message'])
time.sleep(args.scan_delay)
continue
# Got the response, parse it out, send todo's to db/wh queues
try:
parsed = parse_map(args, response_dict, step_location, dbq, whq)
search_items_queue.task_done()
status[('success' if parsed['count'] > 0 else 'noitems')] += 1
status['message'] = 'Search at {:6f},{:6f} completed with {} finds'.format(step_location[0], step_location[1], parsed['count'])
status['fail'] = 0
log.debug(status['message'])
except KeyError:
parsed = False
status['fail'] += 1
status['message'] = 'Map parse failed at {:6f},{:6f}, abandoning location. {} may be banned.'.format(step_location[0], step_location[1], account['username'])
log.exception(status['message'])
# Get detailed information about gyms
if args.gym_info and parsed:
# build up a list of gyms to update
gyms_to_update = {}
for gym in parsed['gyms'].values():
# Can only get gym details within 1km of our position
distance = calc_distance(step_location, [gym['latitude'], gym['longitude']])
if distance < 1:
# check if we already have details on this gym (if not, get them)
try:
record = GymDetails.get(gym_id=gym['gym_id'])
except GymDetails.DoesNotExist as e:
gyms_to_update[gym['gym_id']] = gym
continue
# if we have a record of this gym already, check if the gym has been updated since our last update
if record.last_scanned < gym['last_modified']:
gyms_to_update[gym['gym_id']] = gym
continue
else:
log.debug('Skipping update of gym @ %f/%f, up to date', gym['latitude'], gym['longitude'])
continue
else:
log.debug('Skipping update of gym @ %f/%f, too far away from our location at %f/%f (%fkm)', gym['latitude'], gym['longitude'], step_location[0], step_location[1], distance)
if len(gyms_to_update):
gym_responses = {}
current_gym = 1
status['message'] = 'Updating {} gyms for location {},{}...'.format(len(gyms_to_update), step_location[0], step_location[1])
log.debug(status['message'])
for gym in gyms_to_update.values():
status['message'] = 'Getting details for gym {} of {} for location {},{}...'.format(current_gym, len(gyms_to_update), step_location[0], step_location[1])
time.sleep(random.random() + 2)
response = gym_request(api, step_location, gym)
# make sure the gym was in range. (sometimes the API gets cranky about gyms that are ALMOST 1km away)
if response['responses']['GET_GYM_DETAILS']['result'] == 2:
log.warning('Gym @ %f/%f is out of range (%dkm), skipping', gym['latitude'], gym['longitude'], distance)
else:
gym_responses[gym['gym_id']] = response['responses']['GET_GYM_DETAILS']
# increment which gym we're on (for status messages)
current_gym += 1
status['message'] = 'Processing details of {} gyms for location {},{}...'.format(len(gyms_to_update), step_location[0], step_location[1])
log.debug(status['message'])
if gym_responses:
parse_gyms(args, gym_responses, whq)
# Always delay the desired amount after "scan" completion
status['message'] += ', sleeping {}s until {}'.format(args.scan_delay, time.strftime('%H:%M:%S', time.localtime(time.time() + args.scan_delay)))
time.sleep(args.scan_delay)
# catch any process exceptions, log them, and continue the thread
except Exception as e:
status['message'] = 'Exception in search_worker using account {}. Restarting with fresh account. See logs for details.'.format(account['username'])
time.sleep(args.scan_delay)
log.error('Exception in search_worker under account {} Exception message: {}'.format(account['username'], e))
account_failures.append({'account': account, 'last_fail_time': now(), 'reason': 'exception'})
def check_login(args, account, api, position, proxy_url):
# Logged in? Enough time left? Cool!
if api._auth_provider and api._auth_provider._ticket_expire:
remaining_time = api._auth_provider._ticket_expire / 1000 - time.time()
if remaining_time > 60:
log.debug('Credentials remain valid for another %f seconds', remaining_time)
return
# Try to login (a few times, but don't get stuck here)
i = 0
api.set_position(position[0], position[1], position[2])
while i < args.login_retries:
try:
if proxy_url:
api.set_authentication(provider=account['auth_service'], username=account['username'], password=account['password'], proxy_config={'http': proxy_url, 'https': proxy_url})
else:
api.set_authentication(provider=account['auth_service'], username=account['username'], password=account['password'])
break
except AuthException:
if i >= args.login_retries:
raise TooManyLoginAttempts('Exceeded login attempts')
else:
i += 1
log.error('Failed to login to Pokemon Go with account %s. Trying again in %g seconds', account['username'], args.login_delay)
time.sleep(args.login_delay)
log.debug('Login for account %s successful', account['username'])
time.sleep(args.scan_delay)
def map_request(api, position, jitter=False):
# create scan_location to send to the api based off of position, because tuples aren't mutable
if jitter:
# jitter it, just a little bit.
scan_location = jitterLocation(position)
log.debug('Jittered to: %f/%f/%f', scan_location[0], scan_location[1], scan_location[2])
else:
# Just use the original coordinates
scan_location = position
try:
cell_ids = util.get_cell_ids(scan_location[0], scan_location[1])
timestamps = [0, ] * len(cell_ids)
return api.get_map_objects(latitude=f2i(scan_location[0]),
longitude=f2i(scan_location[1]),
since_timestamp_ms=timestamps,
cell_id=cell_ids)
except Exception as e:
log.warning('Exception while downloading map: %s', e)
return False
def gym_request(api, position, gym):
try:
log.debug('Getting details for gym @ %f/%f (%fkm away)', gym['latitude'], gym['longitude'], calc_distance(position, [gym['latitude'], gym['longitude']]))
x = api.get_gym_details(gym_id=gym['gym_id'],
player_latitude=f2i(position[0]),
player_longitude=f2i(position[1]),
gym_latitude=gym['latitude'],
gym_longitude=gym['longitude'])
# print pretty(x)
return x
except Exception as e:
log.warning('Exception while downloading gym details: %s', e)
return False
def calc_distance(pos1, pos2):
R = 6378.1 # km radius of the earth
dLat = math.radians(pos1[0] - pos2[0])
dLon = math.radians(pos1[1] - pos2[1])
a = math.sin(dLat / 2) * math.sin(dLat / 2) + \
math.cos(math.radians(pos1[0])) * math.cos(math.radians(pos2[0])) * \
math.sin(dLon / 2) * math.sin(dLon / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = R * c
return d
# Delay each thread start time so that logins only occur ~1s
def stagger_thread(args, account):
if args.accounts.index(account) == 0:
return # No need to delay the first one
delay = args.accounts.index(account) + ((random.random() - .5) / 2)
log.debug('Delaying thread startup for %.2f seconds', delay)
time.sleep(delay)
class TooManyLoginAttempts(Exception):
pass
|
xrobau/PoGoMap
|
pogom/search.py
|
Python
|
agpl-3.0
| 36,713
|
import json
from datetime import timedelta, datetime
from django.db import connection
from django.db.models.aggregates import Count
from django.contrib import messages
from django.utils.timezone import now
from django.views.generic import TemplateView
from django.template.defaultfilters import date as format_date
from django.template.defaultfilters import time as format_time
from croniter import croniter
from plugin_manager.launch_window.models import LaunchWindow
class Dashboard(TemplateView):
template_name = 'dashboard.html'
def get_context_data(self, **kwargs):
context = super(Dashboard, self).get_context_data(**kwargs)
context['chart_data'] = ""
return context
|
ahharu/plugin-manager
|
plugin_manager/core/views.py
|
Python
|
mit
| 713
|
import numpy as np
atom_dtype = np.dtype([("position","float32", 3),
("color","float32", 3),
("radius", "float32")])
radii = {
"C" : 1.70,
"O" : 1.52,
"N" : 1.55,
"H" : 1.20,
"P" : 1.80,
"S" : 1.80,
}
colors = {
"C" : [0.2, 1.0, 0.2],
"O" : [1.0, 0.3, 0.3],
"N" : [0.2, 0.2, 1.0],
"H" : [0.9, 0.9, 0.9],
"P" : [1.0, 0.5, 0.0],
"S" : [0.9, 0.775, 0.25],
}
lines = open(filename).readlines()
atoms0 = []
for l in lines:
ll = l.split()
if ll[0] not in ("ATOM" , "HETATM"):
continue
atomname = l[12:16].strip()
x = float(l[30:38])
y = float(l[38:46])
z = float(l[46:54])
chain = l[21]
resnr = int(l[22:26])
resname = l[17:20].strip()
element = atomname[0]
atoms0.append( ((x,y,z),element) )
atoms = np.zeros(len(atoms0), dtype=atom_dtype)
atoms["position"] = np.array([a[0] for a in atoms0], dtype="float32")
for anr, a in enumerate(atoms0):
_, element = a
radius = radii.get(element, 1.8)
atoms[anr]["radius"] = radius
color = colors.get(element, [1,0,0])
atoms[anr]["color"] = color
atoms["position"] -= np.mean(atoms["position"],axis=0)
p = atoms["position"]
min = np.abs(np.min(p,axis=0))
max = np.abs(np.max(p,axis=0))
bound = np.max(np.concatenate([min,max]))
bound /= 5.0 #scale to this
p /= bound
atoms["radius"] /= bound
return atoms
|
sjdv1982/seamless
|
docs/archive/0.2-cleanup/3D/cell-load-pdb.py
|
Python
|
mit
| 1,407
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010 Romain Bignon
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from PyQt4.QtGui import QScrollArea, QWidget, QHBoxLayout, QVBoxLayout, QFrame, QLabel, QImage, QPixmap
from weboob.capabilities.account import ICapAccount, StatusField
from weboob.tools.application.qt import QtDo
class Account(QFrame):
def __init__(self, weboob, backend, parent=None):
QFrame.__init__(self, parent)
self.setFrameShape(QFrame.StyledPanel)
self.setFrameShadow(QFrame.Raised)
self.weboob = weboob
self.backend = backend
self.setLayout(QVBoxLayout())
self.timer = None
head = QHBoxLayout()
headw = QWidget()
headw.setLayout(head)
self.title = QLabel(u'<h1>%s — %s</h1>' % (backend.name, backend.DESCRIPTION))
self.body = QLabel()
if backend.ICON:
self.icon = QLabel()
img = QImage(backend.ICON)
self.icon.setPixmap(QPixmap.fromImage(img))
head.addWidget(self.icon)
head.addWidget(self.title)
head.addStretch()
self.layout().addWidget(headw)
if backend.has_caps(ICapAccount):
self.body.setText(u'<i>Waiting...</i>')
self.layout().addWidget(self.body)
self.timer = self.weboob.repeat(60, self.updateStats)
def deinit(self):
if self.timer is not None:
self.weboob.stop(self.timer)
def updateStats(self):
self.process = QtDo(self.weboob, self.updateStats_cb, self.updateStats_eb)
self.process.body = u''
self.process.in_p = False
self.process.do('get_account_status', backends=self.backend)
def updateStats_cb(self, backend, field):
if not field:
if self.process.in_p:
self.process.body += u"</p>"
self.body.setText(self.process.body)
self.process = None
return
if field.flags & StatusField.FIELD_HTML:
value = u'%s' % field.value
else:
value = (u'%s' % field.value).replace('&', '&').replace('<', '<').replace('>', '>')
if field.flags & StatusField.FIELD_TEXT:
if self.process.in_p:
self.process.body += u'</p>'
self.process.body += u'<p>%s</p>' % value
self.process.in_p = False
else:
if not self.process.in_p:
self.process.body += u"<p>"
self.process.in_p = True
else:
self.process.body += u"<br />"
self.process.body += u'<b>%s</b>: %s' % (field.label, field.value)
def updateStats_eb(self, backend, err, backtrace):
self.body.setText(u'<b>Unable to connect:</b> %s' % unicode(err))
self.title.setText(u'<font color=#ff0000>%s</font>' % unicode(self.title.text()))
class AccountsStatus(QScrollArea):
def __init__(self, weboob, parent=None):
QScrollArea.__init__(self, parent)
self.weboob = weboob
self.setFrameShadow(self.Plain)
self.setFrameShape(self.NoFrame)
self.setWidgetResizable(True)
widget = QWidget(self)
widget.setLayout(QVBoxLayout())
widget.show()
self.setWidget(widget)
def load(self):
while self.widget().layout().count() > 0:
item = self.widget().layout().takeAt(0)
if item.widget():
item.widget().deinit()
item.widget().hide()
item.widget().deleteLater()
for backend in self.weboob.iter_backends():
account = Account(self.weboob, backend)
self.widget().layout().addWidget(account)
self.widget().layout().addStretch()
|
jocelynj/weboob
|
weboob/applications/qhavesex/status.py
|
Python
|
gpl-3.0
| 4,376
|
# sqlalchemy/interfaces.py
# Copyright (C) 2007-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2007 Jason Kirtland jek@discorporate.us
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Deprecated core event interfaces.
This module is **deprecated** and is superseded by the
event system.
"""
from . import event, util
class PoolListener(object):
"""Hooks into the lifecycle of connections in a :class:`.Pool`.
.. note::
:class:`.PoolListener` is deprecated. Please
refer to :class:`.PoolEvents`.
Usage::
class MyListener(PoolListener):
def connect(self, dbapi_con, con_record):
'''perform connect operations'''
# etc.
# create a new pool with a listener
p = QueuePool(..., listeners=[MyListener()])
# add a listener after the fact
p.add_listener(MyListener())
# usage with create_engine()
e = create_engine("url://", listeners=[MyListener()])
All of the standard connection :class:`~sqlalchemy.pool.Pool` types can
accept event listeners for key connection lifecycle events:
creation, pool check-out and check-in. There are no events fired
when a connection closes.
For any given DB-API connection, there will be one ``connect``
event, `n` number of ``checkout`` events, and either `n` or `n - 1`
``checkin`` events. (If a ``Connection`` is detached from its
pool via the ``detach()`` method, it won't be checked back in.)
These are low-level events for low-level objects: raw Python
DB-API connections, without the conveniences of the SQLAlchemy
``Connection`` wrapper, ``Dialect`` services or ``ClauseElement``
execution. If you execute SQL through the connection, explicitly
closing all cursors and other resources is recommended.
Events also receive a ``_ConnectionRecord``, a long-lived internal
``Pool`` object that basically represents a "slot" in the
connection pool. ``_ConnectionRecord`` objects have one public
attribute of note: ``info``, a dictionary whose contents are
scoped to the lifetime of the DB-API connection managed by the
record. You can use this shared storage area however you like.
There is no need to subclass ``PoolListener`` to handle events.
Any class that implements one or more of these methods can be used
as a pool listener. The ``Pool`` will inspect the methods
provided by a listener object and add the listener to one or more
internal event queues based on its capabilities. In terms of
efficiency and function call overhead, you're much better off only
providing implementations for the hooks you'll be using.
"""
@classmethod
def _adapt_listener(cls, self, listener):
"""Adapt a :class:`.PoolListener` to individual
:class:`event.Dispatch` events.
"""
listener = util.as_interface(listener, methods=('connect',
'first_connect', 'checkout', 'checkin'))
if hasattr(listener, 'connect'):
event.listen(self, 'connect', listener.connect)
if hasattr(listener, 'first_connect'):
event.listen(self, 'first_connect', listener.first_connect)
if hasattr(listener, 'checkout'):
event.listen(self, 'checkout', listener.checkout)
if hasattr(listener, 'checkin'):
event.listen(self, 'checkin', listener.checkin)
def connect(self, dbapi_con, con_record):
"""Called once for each new DB-API connection or Pool's ``creator()``.
dbapi_con
A newly connected raw DB-API connection (not a SQLAlchemy
``Connection`` wrapper).
con_record
The ``_ConnectionRecord`` that persistently manages the connection
"""
def first_connect(self, dbapi_con, con_record):
"""Called exactly once for the first DB-API connection.
dbapi_con
A newly connected raw DB-API connection (not a SQLAlchemy
``Connection`` wrapper).
con_record
The ``_ConnectionRecord`` that persistently manages the connection
"""
def checkout(self, dbapi_con, con_record, con_proxy):
"""Called when a connection is retrieved from the Pool.
dbapi_con
A raw DB-API connection
con_record
The ``_ConnectionRecord`` that persistently manages the connection
con_proxy
The ``_ConnectionFairy`` which manages the connection for the span of
the current checkout.
If you raise an ``exc.DisconnectionError``, the current
connection will be disposed and a fresh connection retrieved.
Processing of all checkout listeners will abort and restart
using the new connection.
"""
def checkin(self, dbapi_con, con_record):
"""Called when a connection returns to the pool.
Note that the connection may be closed, and may be None if the
connection has been invalidated. ``checkin`` will not be called
for detached connections. (They do not return to the pool.)
dbapi_con
A raw DB-API connection
con_record
The ``_ConnectionRecord`` that persistently manages the connection
"""
class ConnectionProxy(object):
"""Allows interception of statement execution by Connections.
.. note::
:class:`.ConnectionProxy` is deprecated. Please
refer to :class:`.ConnectionEvents`.
Either or both of the ``execute()`` and ``cursor_execute()``
may be implemented to intercept compiled statement and
cursor level executions, e.g.::
class MyProxy(ConnectionProxy):
def execute(self, conn, execute, clauseelement,
*multiparams, **params):
print "compiled statement:", clauseelement
return execute(clauseelement, *multiparams, **params)
def cursor_execute(self, execute, cursor, statement,
parameters, context, executemany):
print "raw statement:", statement
return execute(cursor, statement, parameters, context)
The ``execute`` argument is a function that will fulfill the default
execution behavior for the operation. The signature illustrated
in the example should be used.
The proxy is installed into an :class:`~sqlalchemy.engine.Engine` via
the ``proxy`` argument::
e = create_engine('someurl://', proxy=MyProxy())
"""
@classmethod
def _adapt_listener(cls, self, listener):
def adapt_execute(conn, clauseelement, multiparams, params):
def execute_wrapper(clauseelement, *multiparams, **params):
return clauseelement, multiparams, params
return listener.execute(conn, execute_wrapper,
clauseelement, *multiparams,
**params)
event.listen(self, 'before_execute', adapt_execute)
def adapt_cursor_execute(conn, cursor, statement,
parameters, context, executemany):
def execute_wrapper(
cursor,
statement,
parameters,
context,
):
return statement, parameters
return listener.cursor_execute(
execute_wrapper,
cursor,
statement,
parameters,
context,
executemany,
)
event.listen(self, 'before_cursor_execute', adapt_cursor_execute)
def do_nothing_callback(*arg, **kw):
pass
def adapt_listener(fn):
def go(conn, *arg, **kw):
fn(conn, do_nothing_callback, *arg, **kw)
return util.update_wrapper(go, fn)
event.listen(self, 'begin', adapt_listener(listener.begin))
event.listen(self, 'rollback',
adapt_listener(listener.rollback))
event.listen(self, 'commit', adapt_listener(listener.commit))
event.listen(self, 'savepoint',
adapt_listener(listener.savepoint))
event.listen(self, 'rollback_savepoint',
adapt_listener(listener.rollback_savepoint))
event.listen(self, 'release_savepoint',
adapt_listener(listener.release_savepoint))
event.listen(self, 'begin_twophase',
adapt_listener(listener.begin_twophase))
event.listen(self, 'prepare_twophase',
adapt_listener(listener.prepare_twophase))
event.listen(self, 'rollback_twophase',
adapt_listener(listener.rollback_twophase))
event.listen(self, 'commit_twophase',
adapt_listener(listener.commit_twophase))
def execute(self, conn, execute, clauseelement, *multiparams, **params):
"""Intercept high level execute() events."""
return execute(clauseelement, *multiparams, **params)
def cursor_execute(self, execute, cursor, statement, parameters,
context, executemany):
"""Intercept low-level cursor execute() events."""
return execute(cursor, statement, parameters, context)
def begin(self, conn, begin):
"""Intercept begin() events."""
return begin()
def rollback(self, conn, rollback):
"""Intercept rollback() events."""
return rollback()
def commit(self, conn, commit):
"""Intercept commit() events."""
return commit()
def savepoint(self, conn, savepoint, name=None):
"""Intercept savepoint() events."""
return savepoint(name=name)
def rollback_savepoint(self, conn, rollback_savepoint, name, context):
"""Intercept rollback_savepoint() events."""
return rollback_savepoint(name, context)
def release_savepoint(self, conn, release_savepoint, name, context):
"""Intercept release_savepoint() events."""
return release_savepoint(name, context)
def begin_twophase(self, conn, begin_twophase, xid):
"""Intercept begin_twophase() events."""
return begin_twophase(xid)
def prepare_twophase(self, conn, prepare_twophase, xid):
"""Intercept prepare_twophase() events."""
return prepare_twophase(xid)
def rollback_twophase(self, conn, rollback_twophase, xid, is_prepared):
"""Intercept rollback_twophase() events."""
return rollback_twophase(xid, is_prepared)
def commit_twophase(self, conn, commit_twophase, xid, is_prepared):
"""Intercept commit_twophase() events."""
return commit_twophase(xid, is_prepared)
|
Drvanon/Game
|
venv/lib/python3.3/site-packages/sqlalchemy/interfaces.py
|
Python
|
apache-2.0
| 10,918
|
#!/usr/bin/env python
"""git_utils.py - Wrappers and utilities for interacting with Git
"""
from __future__ import absolute_import
from subprocess import Popen, STDOUT, PIPE, CalledProcessError
from collections import namedtuple
from functools import partial
from hashlib import sha1
from six.moves import filter
from six import iteritems
from stdci_libs import file_utils
import sys
if sys.version_info[0] >= 3:
from urllib.parse import urlsplit
else:
# python2 compatability
from urlparse import urlsplit
import logging
import os
logger = logging.getLogger(__name__)
class GitProcessError(CalledProcessError):
pass
def commit_files(
files, branch=None, commit_msg=None,
add_change_id=True, add_headers=None, repo_dir='.', change_id_headers=None
):
"""
executes staging and committing with specified params
:param list<str> files a list of git command line args
:param str branch: git branch to be on
:param str commit_msg: message body(w/o any header) for the commit msg
:param bool add_change_id: if Change-Id should be appended to commit msg
:param str repo_dir: dir to stay on when executing git commands
:param list<str> change_id_headers: a list of change_id that need to be appended
to commit msg
raises IOError when files to be committed cannot be accessed
:rtype: list a list of changed files
: returns: output or error of the command
"""
if repo_dir and repo_dir.strip():
file_utils.workdir(repo_dir.strip())
if change_id_headers:
change_id_headers = set(change_id_headers)
else: change_id_headers = set()
if add_change_id:
change_id_headers.add('Change-Id')
if len(git('log', '--oneline').splitlines()):
git('reset', 'HEAD')
git('add', *filter(os.path.exists, files))
changed_files = staged_files()
if changed_files:
if branch:
git('checkout', '-B', branch)
git('commit', '-m', commit_message(
changed_files, commit_msg, change_id_headers, add_headers
))
return changed_files
def staged_files():
"""
gets a list of staged files in current index
rtype: list<str> staged files
"""
return git('diff', '--staged', '--name-only').splitlines()
def commit_message(
changed_files=None, commit_message=None,
change_id_headers=None, extra_headers=None
):
"""
generates commit message
:param list<str> changed_files: paths of changed files
:param str commit_message: msg body(w/o any header) for the commit
:param list<str> change_id_headers: a list of change_id need to be appended
to commit msg
:param list<str> extra_headers: a list of extra headers (e.x. `Signed-off-by:`)
need to be appended to commit msg
:rtype: str
: returns: generated commit msg
"""
if commit_message:
commit_message = str(commit_message).strip()
if not commit_message:
commit_message = commit_title(changed_files)
if len(changed_files) > 1:
commit_message += '\n\nChanged files:\n'
commit_message += '\n'.join(
'- ' + fil for fil in changed_files
)
headers = commit_headers(
changed_files, change_id_headers, extra_headers
)
if headers:
commit_message += '\n'
commit_message += headers
return commit_message
def commit_title(changed_files, max_title=60):
"""
generates commit title for a git commit msg
:param list<str> changed_files: file paths of changed files
:param int max_titile: maximum length for commit title
:rtype: str
: returns: commit title
"""
if len(changed_files) != 1:
return 'Changed {} files'.format(len(changed_files))
title = 'Changed: {}'.format(changed_files[0])
if len(title) <= max_title:
return title
title = 'Changed: {}'.format(os.path.basename(changed_files[0]))
if len(title) <= max_title:
return title
return 'Changed one file'
def commit_headers(changed_files, change_id_headers, extra_headers):
"""
generates commit headers with line breaks
:param list<str> changed_files: file paths of changed files
:param list<str> change_id_headers: a list of change_id need to be appended
to commit msg
:param list<str> extra_headers: a list of extra headers (e.x. `Signed-off-by:`)
need to be appended to commit msg
:rtype: str
: returns: line-breaked commit headers
"""
headers = ''
if extra_headers:
for hdr, val in sorted(iteritems(extra_headers)):
headers += '\n{}: {}'.format(hdr, val)
if changed_files and change_id_headers:
change_id_set = False
change_id = 'I' + files_checksum(changed_files)
for hdr in sorted(set(change_id_headers)):
if hdr == 'Change-Id':
change_id_set = True
continue
headers += '\n{}: {}'.format(hdr, change_id)
# Ensure that 'Change-Id' is the last header we set because Gerrit
# needs it to be on the very last line of the commit message
if change_id_set:
headers += '\nChange-Id: {}'.format(change_id)
return headers
def files_checksum(changed_files):
"""
generates sha1 checksum for a list of files
:param list<str>:changed_files: paths for changed files
:rtype: str
returns: checksum what will be used as change-Id representing changed files
"""
digest = sha1()
for fil in sorted(set(changed_files)):
digest.update(fil.encode('utf-8'))
with open(fil, 'rb') as f:
digest.update(f.read())
return digest.hexdigest()
def git(*args, **kwargs):
"""
Util function to execute git commands
:param list *args: a list of git command line args
:param bool append_stderr: if set to true, append STDERR to the output
Executes git commands and return output.
raise GitProcessError: if git fails
:rtype: str
:returns: output or error of the command
"""
git_command = ['git']
git_command.extend(args)
stderr = (STDOUT if kwargs.get('append_stderr', False) else PIPE)
logger.info("Executing command: '%s'", ' '.join(git_command))
process = Popen(git_command, stdout=PIPE, stderr=stderr)
output, error = process.communicate()
retcode = process.poll()
if error is None:
error = ''
else:
error = error.decode('utf-8')
output = output.decode('utf-8')
logger.debug('Git exited with status: %d', retcode, extra={'blocks': (
('stderr', error), ('stdout', output)
)},)
if retcode:
raise GitProcessError(retcode, git_command)
return output
class InvalidGitRef(Exception):
def __init__(self, message, ref):
super(InvalidGitRef, self).__init__(message)
self.ref = ref
def git_rev_parse(ref, git_func=git):
"""Parse a git ref and return the equivalent hash
:param str ref: a git commit reference to parse (branch name, tag, etc.)
:param Callable git_func: (optional) A git function to use instead of the
default one: git
:rtype: str
"""
try:
return git_func('rev-parse', "{0}^{{commit}}".format(ref)).rstrip()
except GitProcessError as e:
if e.returncode == 128:
raise InvalidGitRef(
"Invalid Git ref given: '{0}'".format(ref), ref
)
else:
raise
def get_repo_root():
"""Returns absolute path to the root of the repository
:rtype: str
"""
return os.path.abspath(git("rev-parse", "--show-toplevel").strip())
def prep_git_repo(path, url, refspec=None, checkout=False):
"""Initialize git repo at the specified path and add a remote.
:param str path: path in which to initialize the repository at
:param str url: this URL will be set as origin for pulling and pushing
:param str refspec: if provided, will fetch and checkout to specified ref.
a following branch with the same name as the refspec
will be created.
:param bool checkout: if set to True, will checkout to the fetched refspec.
this will do nothing if `refspec` is not specified.
:raises GitProcessError: if the git process failed for any reason
:returns: git function set to use the initialized git dir and the second
element is the sha of the fetched refspec (if exists).
"""
local_ref_name = 'myhead'
root_path = os.path.realpath(str(path))
git('init', root_path)
git_func = partial(
git,
'--git-dir={0}'.format(os.path.join(root_path, ".git")),
'--work-tree={0}'.format(root_path)
)
git_func('remote', 'add', 'origin', url)
if refspec:
logger.info('will fetch {}'.format(refspec))
git_func('fetch', 'origin', '+{0}:{1}'.format(refspec, local_ref_name))
if checkout:
git_func('checkout', local_ref_name)
return git_func, git_rev_parse(local_ref_name, git_func)
return git_func, ''
class CouldNotParseRepoURL(Exception):
"""Tell the user that we failed to parse repo URL"""
pass
def get_name_from_repo_url(repo_url):
"""Extract the name of the repository from a given url
:param str repo_url: the URL of the repository
:raises ValueError: when it fails to parse the URL and extract the repo
name from it
:returns: the repo name
"""
repo_path = urlsplit(repo_url).path
# splitext always returns a tuple of len(2). Even if repo_path is empty.
repo_name = os.path.basename(os.path.splitext(repo_path)[0])
if not repo_name:
raise CouldNotParseRepoURL(
'could not parse repo name from repo url: {0}'.format(repo_url)
)
logger.info('parsed repo name from url: %s', repo_name)
return repo_name
|
oVirt/jenkins
|
stdci_libs/git_utils.py
|
Python
|
gpl-3.0
| 10,074
|
trna5 = 'GGCGCGTTAACAAAGCGGTTATGTAGCGGATT'
trna3 = 'AATCCGTCTAGTCCGGTTCGACTCCGGAACGCGCCTCCA'
trnaAC_R = 'tcg'
trnaAC_D = 'gtc'
flank5 = 'cccctctaga'
flank3 = 'tgggaaagataag'
mut_trna5 = trna5.replace('G','(02029402)').replace('C','(02940202)').replace('T','(02020294)').replace('A','(94020202)')
mut_trna3 = trna3.replace('G','(02029402)').replace('C','(02940202)').replace('T','(02020294)').replace('A','(94020202)')
mut_trna_R = flank5+trna5+trnaAC_R+trna3+flank3
mut_trna_D = flank5+trna5+trnaAC_D+trna3+flank3
print 'R:\n', mut_trna_R
print 'D:\n', mut_trna_D
|
jgcwell/python_jenna
|
Cys tRNA sequence mutagenizer.py
|
Python
|
mit
| 566
|
# Unit tests for Spanish verb conjugation
import unittest
import languages.spanish as Spanish
from functools import partial
class TestSpanishConjugator(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.test_verbs = ['hablar', 'vender', 'vivir']
def test_present(self):
expected = [['hablo', 'hablas', 'hablás', 'habla',
'hablamos', 'habláis', 'hablan'],
['vendo', 'vendes', 'vendés', 'vende',
'vendemos', 'vendéis', 'venden'],
['vivo', 'vives', 'vivís', 'vive',
'vivimos', 'vivís', 'viven']]
self.run_sub_tests(expected, 'presente')
def test_present_with_pronouns(self):
expected = [['yo hablo',
'tú hablas', 'vos hablás',
'él/ella/usted habla',
'nosotros/nosotras hablamos',
'vosotros/vosotras habláis',
'ellos/ellas/ustedes hablan'],
['yo vendo',
'tú vendes', 'vos vendés',
'él/ella/usted vende',
'nosotros/nosotras vendemos',
'vosotros/vosotras vendéis',
'ellos/ellas/ustedes venden'],
['yo vivo',
'tú vives', 'vos vivís',
'él/ella/usted vive',
'nosotros/nosotras vivimos',
'vosotros/vosotras vivís',
'ellos/ellas/ustedes viven']]
with self.subTest():
for i, verb in enumerate(TestSpanishConjugator.test_verbs):
conj = Spanish.construct_inflection(verb, 'presente')
actual = [Spanish._STD_FORMAT.format(*item) for item in conj]
self.assertEqual(expected[i], actual)
def test_imperfect(self):
expected = [['hablaba', 'hablabas', 'hablabas', 'hablaba',
'hablábamos', 'hablabais', 'hablaban'],
['vendía', 'vendías', 'vendías', 'vendía',
'vendíamos', 'vendíais', 'vendían'],
['vivía', 'vivías', 'vivías', 'vivía',
'vivíamos', 'vivíais', 'vivían']]
self.run_sub_tests(expected, 'pretérito imperfecto')
def test_future(self):
expected = [['hablaré', 'hablarás', 'hablarás', 'hablará',
'hablaremos', 'hablaréis', 'hablarán'],
['venderé', 'venderás', 'venderás', 'venderá',
'venderemos', 'venderéis', 'venderán'],
['viviré', 'vivirás', 'vivirás', 'vivirá',
'viviremos', 'viviréis', 'vivirán']]
self.run_sub_tests(expected, 'futuro simple')
def test_preterit(self):
expected = [['hablé', 'hablaste(s)', 'hablaste(s)', 'habló',
'hablamos', 'hablasteis', 'hablaron'],
['vendí', 'vendiste(s)', 'vendiste(s)', 'vendió',
'vendimos', 'vendisteis', 'vendieron'],
['viví', 'viviste(s)', 'viviste(s)', 'vivió',
'vivimos', 'vivisteis', 'vivieron']
]
self.run_sub_tests(expected, 'pretérito indefinido')
def run_sub_tests(self, expected, tense):
conj = partial(Spanish.construct_stem_and_ending, tense=tense)
for i, verb in enumerate(TestSpanishConjugator.test_verbs):
with self.subTest(verb=verb, tense=tense):
[self.assertEqual(expect, actual) for expect, actual in
zip(expected[i], conj(verb))]
def test_present_perfect(self):
expected = [['he hablado', 'has hablado', 'has hablado', 'ha hablado',
'hemos hablado', 'habéis hablado', 'han hablado'],
['he vendido', 'has vendido', 'has vendido', 'ha vendido',
'hemos vendido', 'habéis vendido', 'han vendido'],
['he vivido', 'has vivido', 'has vivido', 'ha vivido',
'hemos vivido', 'habéis vivido', 'han vivido']]
self.run_sub_tests(expected, 'pretérito perfecto')
class TestcompoundTenses(unittest.TestCase):
def test_past_participle(self):
expected = ['hablado', 'vendido', 'vivido']
actual = [Spanish._construct_past_participle('hablar'),
Spanish._construct_past_participle('vender'),
Spanish._construct_past_participle('vivir')]
self.assertEqual(expected, actual
)
|
wmealem/VerbTrainer
|
tests/test_spanish_conjugator.py
|
Python
|
mit
| 4,633
|
import datetime
import logging
from django.conf import settings
from django.apps import apps
from django.db import models, connections
from django.dispatch import receiver
from django.db.models.signals import post_save
from google.appengine.api import datastore
from google.appengine.ext import deferred
from djangae.db import transaction
from djangae.fields import RelatedSetField
from djangae.contrib.mappers.pipes import MapReduceTask, DjangaeMapperPipeline, PIPELINE_BASE_PATH
from djangae.db.utils import django_instance_to_entity
from djangae.db.unique_utils import unique_identifiers_from_entity
from djangae.db.constraints import UniqueMarker
from djangae.db.caching import disable_cache
logger = logging.getLogger(__name__)
ACTION_TYPES = [
('check', 'Check'), # Verify all models unique contraint markers exist and are assigned to it.
('repair', 'Repair'), # Recreate any missing markers
('clean', 'Clean'), # Remove any marker that isn't properly linked to an instance.
]
ACTION_STATUSES = [
('running', 'Running'),
('done', 'Done'),
]
LOG_MSGS = [
('missing_marker', "Marker for the unique constraint is missing"),
('missing_instance', "Unique constraint marker exists, but doesn't point to the instance"),
('already_assigned', "Marker is assigned to a different instance already"),
('old_instance_key', "Marker was created when instance was a StringProperty")
]
MAX_ERRORS = 100
def encode_model(model):
return "%s,%s" % (model._meta.app_label, model._meta.model_name)
def decode_model(model_str):
return apps.get_model(*model_str.split(','))
class ActionLog(models.Model):
instance_key = models.TextField()
marker_key = models.CharField(max_length=500)
log_type = models.CharField(max_length=255, choices=LOG_MSGS)
action = models.ForeignKey('UniqueAction')
class UniqueAction(models.Model):
action_type = models.CharField(choices=ACTION_TYPES, max_length=100)
model = models.CharField(max_length=100)
db = models.CharField(max_length=100, default='default')
status = models.CharField(choices=ACTION_STATUSES, default=ACTION_STATUSES[0][0], editable=False, max_length=100)
logs = RelatedSetField(ActionLog, editable=False)
def _log_action(action_id, log_type, instance_key, marker_key):
@transaction.atomic(xg=True)
def _atomic(action_id, log_type, instance_key, marker_key):
action = UniqueAction.objects.get(pk=action_id)
if len(action.logs) > MAX_ERRORS:
return
log = ActionLog.objects.create(
action_id=action_id,
log_type=log_type,
instance_key=instance_key,
marker_key=marker_key)
action.logs.add(log)
action.save()
_atomic(action_id, log_type, instance_key, marker_key)
def log(action_id, log_type, instance_key, marker_key, defer=True):
""" Shorthand for creating an ActionLog.
Defer doesn't accept an inline function or an atomic wrapped function directly, so
we defer a helper function, which wraps the transactionaly decorated one. """
if defer:
deferred.defer(_log_action, action_id, log_type, instance_key, marker_key)
else:
_log_action(action_id, log_type, instance_key, marker_key)
@receiver(post_save, sender=UniqueAction)
def start_action(sender, instance, created, raw, **kwargs):
if created == False:
# we are saving because status is now "done"?
return
kwargs = dict(
action_pk=instance.pk,
)
if instance.action_type == "clean":
kwargs.update(model=instance.model)
CleanMapper(db=instance.db).start(**kwargs)
else:
kwargs.update(repair=instance.action_type=="repair")
CheckRepairMapper(model=decode_model(instance.model), db=instance.db).start(**kwargs)
def _finish(*args, **kwargs):
action_pk = kwargs.get('action_pk')
@transaction.atomic
def finish_the_action():
action = UniqueAction.objects.get(pk=action_pk)
action.status = "done"
action.save()
finish_the_action()
class RawMapperMixin(object):
def get_model_app_(self):
return None
def start(self, *args, **kwargs):
kwargs['db'] = self.db
mapper_parameters = {
'entity_kind': self.kind,
'keys_only': False,
'kwargs': kwargs,
'args': args,
'namespace': settings.DATABASES.get(self.db, {}).get('NAMESPACE'),
}
mapper_parameters['_map'] = self.get_relative_path(self.map)
pipe = DjangaeMapperPipeline(
self.job_name,
'djangae.contrib.mappers.thunks.thunk_map',
'mapreduce.input_readers.RawDatastoreInputReader',
params=mapper_parameters,
shards=self.shard_count
)
pipe.start(base_path=PIPELINE_BASE_PATH)
class CheckRepairMapper(MapReduceTask):
name = 'action_mapper'
kind = '_djangae_unique_marker'
def start(self, *args, **kwargs):
kwargs['db'] = self.db
return super(CheckRepairMapper, self).start(*args, **kwargs)
@staticmethod
def finish(*args, **kwargs):
_finish(*args, **kwargs)
@staticmethod
def map(instance, *args, **kwargs):
""" Figure out what markers the instance should use and verify they're attached to
this instance. Log any weirdness and in repair mode - recreate missing markers. """
action_id = kwargs.get("action_pk")
repair = kwargs.get("repair")
alias = kwargs.get("db", "default")
namespace = settings.DATABASES.get(alias, {}).get("NAMESPACE")
assert alias == (instance._state.db or "default")
entity = django_instance_to_entity(connections[alias], type(instance), instance._meta.fields, raw=True, instance=instance, check_null=False)
identifiers = unique_identifiers_from_entity(type(instance), entity, ignore_pk=True)
identifier_keys = [datastore.Key.from_path(UniqueMarker.kind(), i, namespace=namespace) for i in identifiers]
markers = datastore.Get(identifier_keys)
instance_key = str(entity.key())
markers_to_save = []
for i, m in zip(identifier_keys, markers):
marker_key = str(i)
if m is None:
# Missig marker
if repair:
new_marker = datastore.Entity(UniqueMarker.kind(), name=i.name(), namespace=namespace)
new_marker['instance'] = entity.key()
new_marker['created'] = datetime.datetime.now()
markers_to_save.append(new_marker)
else:
log(action_id, "missing_marker", instance_key, marker_key)
elif 'instance' not in m or not m['instance']:
# Marker with missining instance attribute
if repair:
m['instance'] = entity.key()
markers_to_save.append(m)
else:
log(action_id, "missing_instance", instance_key, marker_key)
elif m['instance'] != entity.key():
if isinstance(m['instance'], basestring):
m['instance'] = datastore.Key(m['instance'])
if repair:
markers_to_save.append(m)
else:
log(action_id, "old_instance_key", instance_key, marker_key)
if m['instance'] != entity.key():
# Marker already assigned to a different instance
log(action_id, "already_assigned", instance_key, marker_key)
# Also log in repair mode as reparing would break the other instance.
if markers_to_save:
datastore.Put(markers_to_save)
class CleanMapper(RawMapperMixin, MapReduceTask):
name = 'action_clean_mapper'
kind = '_djangae_unique_marker'
@staticmethod
def finish(*args, **kwargs):
_finish(*args, **kwargs)
@staticmethod
def map(entity, model, *args, **kwargs):
""" The Clean mapper maps over all UniqueMarker instances. """
alias = kwargs.get("db", "default")
namespace = settings.DATABASES.get(alias, {}).get("NAMESPACE", "")
model = decode_model(model)
if not entity.key().id_or_name().startswith(model._meta.db_table + "|"):
# Only include markers which are for this model
return
assert namespace == entity.namespace()
with disable_cache():
# At this point, the entity is a unique marker that is linked to an instance of 'model', now we should see if that instance exists!
instance_id = entity["instance"].id_or_name()
try:
instance = model.objects.using(alias).get(pk=instance_id)
except model.DoesNotExist:
logger.info("Deleting unique marker %s because the associated instance no longer exists", entity.key().id_or_name())
datastore.Delete(entity)
return
# Get the possible unique markers for the entity, if this one doesn't exist in that list then delete it
instance_entity = django_instance_to_entity(connections[alias], model, instance._meta.fields, raw=True, instance=instance, check_null=False)
identifiers = unique_identifiers_from_entity(model, instance_entity, ignore_pk=True)
identifier_keys = [datastore.Key.from_path(UniqueMarker.kind(), i, namespace=entity["instance"].namespace()) for i in identifiers]
if entity.key() not in identifier_keys:
logger.info("Deleting unique marker %s because the it no longer represents the associated instance state", entity.key().id_or_name())
datastore.Delete(entity)
|
Ali-aqrabawi/ezclinic
|
lib/djangae/contrib/uniquetool/models.py
|
Python
|
mit
| 9,821
|
#!/usr/bin/python
#
# (c) 2017 Apstra Inc, <community@apstra.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aos_blueprint_virtnet
author: Damien Garros (@dgarros)
version_added: "2.3"
short_description: Manage AOS blueprint parameter values
deprecated:
removed_in: "2.9"
why: This module does not support AOS 2.1 or later
alternative: See new modules at U(https://www.ansible.com/ansible-apstra).
description:
- Apstra AOS Blueprint Virtual Network module let you manage your Virtual Network easily.
You can create access, define and delete Virtual Network by name or by using a JSON / Yaml file.
This module is idempotent and support the I(check) mode. It's using the AOS REST API.
requirements:
- "aos-pyez >= 0.6.0"
options:
session:
description:
- An existing AOS session as obtained by M(aos_login) module.
required: true
blueprint:
description:
- Blueprint Name or Id as defined in AOS.
required: True
name:
description:
- Name of Virtual Network as part of the Blueprint.
content:
description:
- Datastructure of the Virtual Network to manage. The data can be in YAML / JSON or
directly a variable. It's the same datastructure that is returned on success in I(value).
state:
description:
- Indicate what is the expected state of the Virtual Network (present or not).
default: present
choices: ['present', 'absent']
'''
EXAMPLES = '''
- name: "Access Existing Virtual Network"
aos_blueprint_virtnet:
session: "{{ aos_session }}"
blueprint: "my-blueprint-l2"
name: "my-virtual-network"
state: present
- name: "Delete Virtual Network with JSON File"
aos_blueprint_virtnet:
session: "{{ aos_session }}"
blueprint: "my-blueprint-l2"
content: "{{ lookup('file', 'resources/virtual-network-02.json') }}"
state: absent
- name: "Create Virtual Network"
aos_blueprint_virtnet:
session: "{{ aos_session }}"
blueprint: "my-blueprint-l2"
content: "{{ lookup('file', 'resources/virtual-network-02.json') }}"
state: present
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.network.aos.aos import get_aos_session, find_collection_item, do_load_resource, check_aos_version, content_to_dict
def ensure_present(module, aos, blueprint, virtnet):
# if exist already return tru
if virtnet.exists:
module.exit_json(changed=False,
blueprint=blueprint.name,
name=virtnet.name,
id=virtnet.id,
value=virtnet.value)
else:
if not module.check_mode:
try:
virtnet.create(module.params['content'])
except Exception as e:
module.fail_json(msg="unable to create virtual-network : %s" % to_native(e))
module.exit_json(changed=True,
blueprint=blueprint.name,
name=virtnet.name,
id=virtnet.id,
value=virtnet.value)
def ensure_absent(module, aos, blueprint, virtnet):
if virtnet.exists:
if not module.check_mode:
try:
virtnet.delete()
except Exception as e:
module.fail_json(msg="unable to delete virtual-network %s : %s" % (virtnet.name, to_native(e)))
module.exit_json(changed=True,
blueprint=blueprint.name)
else:
module.exit_json(changed=False,
blueprint=blueprint.name)
def blueprint_virtnet(module):
margs = module.params
# --------------------------------------------------------------------
# Get AOS session object based on Session Info
# --------------------------------------------------------------------
try:
aos = get_aos_session(module, margs['session'])
except Exception:
module.fail_json(msg="Unable to login to the AOS server")
# --------------------------------------------------------------------
# Get the blueprint Object based on either name or ID
# --------------------------------------------------------------------
try:
blueprint = find_collection_item(aos.Blueprints,
item_name=margs['blueprint'],
item_id=margs['blueprint'])
except Exception:
module.fail_json(msg="Unable to find the Blueprint based on name or ID, something went wrong")
if blueprint.exists is False:
module.fail_json(msg='Blueprint %s does not exist.\n'
'known blueprints are [%s]' %
(margs['blueprint'], ','.join(aos.Blueprints.names)))
# --------------------------------------------------------------------
# Convert "content" to dict and extract name
# --------------------------------------------------------------------
if margs['content'] is not None:
content = content_to_dict(module, margs['content'])
if 'display_name' in content.keys():
item_name = content['display_name']
else:
module.fail_json(msg="Unable to extract 'display_name' from 'content'")
elif margs['name'] is not None:
item_name = margs['name']
# --------------------------------------------------------------------
# Try to find VirtualNetwork object
# --------------------------------------------------------------------
try:
virtnet = blueprint.VirtualNetworks[item_name]
except Exception:
module.fail_json(msg="Something went wrong while trying to find Virtual Network %s in blueprint %s"
% (item_name, blueprint.name))
# --------------------------------------------------------------------
# Proceed based on State value
# --------------------------------------------------------------------
if margs['state'] == 'absent':
ensure_absent(module, aos, blueprint, virtnet)
elif margs['state'] == 'present':
ensure_present(module, aos, blueprint, virtnet)
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
blueprint=dict(required=True),
name=dict(required=False),
content=dict(required=False, type="json"),
state=dict(choices=['present', 'absent'], default='present')
),
mutually_exclusive=[('name', 'content')],
required_one_of=[('name', 'content')],
supports_check_mode=True
)
# Check if aos-pyez is present and match the minimum version
check_aos_version(module, '0.6.0')
blueprint_virtnet(module)
if __name__ == '__main__':
main()
|
Jorge-Rodriguez/ansible
|
lib/ansible/modules/network/aos/_aos_blueprint_virtnet.py
|
Python
|
gpl-3.0
| 7,661
|
import base64
from collections import Counter
from datetime import datetime, timedelta
import logging
import re
import urllib.parse
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.fields import ArrayField, JSONField
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.urls import reverse
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import connection, IntegrityError, models, transaction
from django.db.models import Count, F, Q, Max
from django.utils import timezone
from django.utils.crypto import get_random_string
from django.utils.functional import cached_property
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from zentral.conf import settings
from zentral.core.compliance_checks.utils import get_machine_compliance_check_statuses
from zentral.core.incidents.models import MachineIncident, Status
from zentral.utils.model_extras import find_all_related_objects
from zentral.utils.mt_models import AbstractMTObject, prepare_commit_tree, MTObjectManager, MTOError
from .conf import (has_deb_packages,
update_ms_tree_platform, update_ms_tree_type,
PLATFORM_CHOICES, PLATFORM_CHOICES_DICT,
TYPE_CHOICES, TYPE_CHOICES_DICT)
from .exceptions import EnrollmentSecretVerificationFailed
logger = logging.getLogger('zentral.contrib.inventory.models')
class MetaBusinessUnitManager(models.Manager):
def get_or_create_with_bu_key_and_name(self, key, name):
try:
mbu = self.filter(businessunit__key=key)[0]
except IndexError:
mbu = MetaBusinessUnit(name=name)
mbu.save()
return mbu
def available_for_api_enrollment(self):
return self.distinct().filter(businessunit__source__module='zentral.contrib.inventory')
class MetaBusinessUnit(models.Model):
"""The object to link the different BusinessUnits."""
name = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = MetaBusinessUnitManager()
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
def get_absolute_url(self):
return reverse('inventory:mbu_machines', args=(self.id,))
def get_current_business_units(self):
# !!! api enrollment business unit excluded !!!
return BusinessUnit.objects.current().exclude(
source__module='zentral.contrib.inventory').filter(meta_business_unit=self)
def api_enrollment_business_units(self):
return self.businessunit_set.filter(source__module='zentral.contrib.inventory').order_by('-id')
def api_enrollment_enabled(self):
return self.api_enrollment_business_units().count() > 0
def create_enrollment_business_unit(self):
reference = "MBU{}".format(self.id)
b, created = BusinessUnit.objects.commit({'source': {'module': 'zentral.contrib.inventory',
'name': 'inventory'},
'reference': reference,
'name': reference},
meta_business_unit=self)
if created:
b.set_meta_business_unit(self)
return b
def tags(self):
tags = list(mbut.tag for mbut in self.metabusinessunittag_set.select_related('tag'))
tags.sort(key=lambda t: (t.meta_business_unit is None, str(t).upper()))
return tags
def serialize(self):
return {"name": self.name,
"pk": self.pk}
def can_be_deleted(self):
for related_objects in find_all_related_objects(self):
if related_objects.objects_count:
if related_objects.name == "businessunit":
# OK to delete if all the business units can be deleted
for bu in related_objects.objects:
if not bu.can_be_deleted():
return False
continue
else:
return False
return True
def delete(self, *args, **kwargs):
if not self.can_be_deleted():
raise ValueError("MBU {} cannot be deleted".format(self.pk))
for b in self.businessunit_set.all():
b.delete()
super().delete(*args, **kwargs)
class SourceManager(MTObjectManager):
def current_machine_group_sources(self):
return (self.filter(machinegroup__machinesnapshot__currentmachinesnapshot__isnull=False)
.distinct()
.order_by("module", "name"))
def current_business_unit_sources(self):
return (self.filter(businessunit__machinesnapshot__currentmachinesnapshot__isnull=False)
.distinct()
.order_by("module", "name"))
def current_machine_snapshot_sources(self):
return (self.filter(currentmachinesnapshot__isnull=False)
.distinct()
.order_by("module", "name"))
def current_macos_apps_sources(self):
# can get really expensive if there are a lot of machines and apps
# it is recommended to use current_machine_snapshot_sources() instead
return (self.filter(machinesnapshot__currentmachinesnapshot__isnull=False,
machinesnapshot__osx_app_instances__isnull=False)
.distinct()
.order_by("module", "name"))
class Source(AbstractMTObject):
module = models.TextField()
name = models.TextField()
config = JSONField(blank=True, null=True)
objects = SourceManager()
def __str__(self):
return self.name
def get_display_name(self):
# TODO: better. see also zentral.inventory.utils
dn = [self.name]
try:
dn.append(self.config["host"])
except (TypeError, KeyError):
pass
return "/".join(e for e in dn if e)
class Link(AbstractMTObject):
anchor_text = models.TextField()
url = models.CharField(max_length=200)
class AbstractMachineGroupManager(MTObjectManager):
def current(self):
return (self.filter(machinesnapshot__currentmachinesnapshot__isnull=False)
.distinct()
.select_related('source')
.order_by('source__module', 'name'))
class AbstractMachineGroup(AbstractMTObject):
source = models.ForeignKey(Source, on_delete=models.PROTECT)
reference = models.TextField()
key = models.CharField(max_length=40, db_index=True)
name = models.TextField()
links = models.ManyToManyField(Link)
objects = AbstractMachineGroupManager()
mt_excluded_fields = ('key',)
class Meta:
abstract = True
def generate_key(self):
source_dict = self.source.serialize()
source_dict.pop('name')
data = {'source': source_dict,
'reference': self.reference}
prepare_commit_tree(data)
return data['mt_hash']
def save(self, *args, **kwargs):
self.key = self.generate_key()
super(AbstractMachineGroup, self).save()
def get_short_key(self):
return self.key[:8]
class BusinessUnit(AbstractMachineGroup):
meta_business_unit = models.ForeignKey(MetaBusinessUnit, on_delete=models.PROTECT)
mt_excluded_fields = ('key', 'meta_business_unit')
def __str__(self):
if self.is_api_enrollment_business_unit():
return "{} API enrollment".format(self.meta_business_unit.name)
else:
return self.name
def save(self, *args, **kwargs):
self.key = self.generate_key()
# get or create the corresponding MetaBusinessUnit
# there must always be a MetaBusinessUnit for every BusinessUnit in the inventory
# MetaBusinessUnits can be edited in the UI, not the BusinessUnits directly
# Many BusinessUnits can be linked to a single MetaBusinessUnit to show that they are equivalent.
mbu = kwargs.get('meta_business_unit', None)
if not mbu:
mbu = MetaBusinessUnit.objects.get_or_create_with_bu_key_and_name(self.key, self.name)
self.meta_business_unit = mbu
super(BusinessUnit, self).save(*args, **kwargs)
def set_meta_business_unit(self, mbu):
self.meta_business_unit = mbu
super(BusinessUnit, self).save()
def is_api_enrollment_business_unit(self):
return self.source.module == "zentral.contrib.inventory"
def get_name_display(self):
if self.is_api_enrollment_business_unit():
return "{} - API enrollment".format(self.meta_business_unit)
else:
return self.name
def can_be_detached(self):
return (not self.is_api_enrollment_business_unit() and
self.meta_business_unit.get_current_business_units().count() > 1)
def detach(self):
if not self.can_be_detached():
return self.meta_business_unit
new_mbu = MetaBusinessUnit.objects.create(name=self.name)
self.set_meta_business_unit(new_mbu)
return new_mbu
def can_be_deleted(self):
return not self.machinesnapshot_set.count()
class MachineGroup(AbstractMachineGroup):
machine_links = models.ManyToManyField(Link, related_name="+") # tmpl for links to machine in a group
class OSVersion(AbstractMTObject):
name = models.TextField(blank=True, null=True)
major = models.PositiveIntegerField()
minor = models.PositiveIntegerField(blank=True, null=True)
patch = models.PositiveIntegerField(blank=True, null=True)
build = models.TextField(blank=True, null=True)
def __str__(self):
items = [self.get_number_display()]
if self.name:
items.insert(0, self.name)
if self.build:
items.append("({})".format(self.build))
return " ".join(items)
def get_number_display(self):
return ".".join((str(i) for i in (self.major, self.minor, self.patch) if i is not None))
class SystemInfo(AbstractMTObject):
computer_name = models.TextField(blank=True, null=True)
hostname = models.TextField(blank=True, null=True)
hardware_model = models.TextField(blank=True, null=True)
hardware_serial = models.TextField(blank=True, null=True)
cpu_type = models.TextField(blank=True, null=True)
cpu_subtype = models.TextField(blank=True, null=True)
cpu_brand = models.TextField(blank=True, null=True)
cpu_physical_cores = models.PositiveIntegerField(blank=True, null=True)
cpu_logical_cores = models.PositiveIntegerField(blank=True, null=True)
physical_memory = models.BigIntegerField(blank=True, null=True)
class Disk(AbstractMTObject):
name = models.TextField()
size = models.BigIntegerField()
class NetworkInterface(AbstractMTObject):
interface = models.TextField(blank=False, null=False)
mac = models.CharField(max_length=23, blank=False, null=False) # 48 or 64 bit with separators
address = models.GenericIPAddressField(blank=False, null=False, unpack_ipv4=True)
mask = models.GenericIPAddressField(blank=True, null=True, unpack_ipv4=True)
broadcast = models.GenericIPAddressField(blank=True, null=True, unpack_ipv4=True)
class Meta:
ordering = ('interface',)
def get_mac_organization(self):
mac = self.mac.replace(":", "").upper()
assignments = [mac[:length] for length in (9, 7, 6)]
found_assignments = list(MACAddressBlockAssignment.objects.select_related("organization")
.filter(assignment__in=assignments))
if not found_assignments:
return None
found_assignments.sort(key=lambda a: len(a.assignment), reverse=True)
return found_assignments[0].organization
class OSXAppManager(MTObjectManager):
def current(self):
return self.distinct().filter(osxappinstance__machinesnapshot__currentmachinesnapshot__isnull=False)
class OSXApp(AbstractMTObject):
bundle_id = models.TextField(db_index=True, blank=True, null=True)
bundle_name = models.TextField(db_index=True, blank=True, null=True)
bundle_display_name = models.TextField(blank=True, null=True)
bundle_version = models.TextField(blank=True, null=True)
bundle_version_str = models.TextField(blank=True, null=True)
objects = OSXAppManager()
def __str__(self):
return " ".join(s for s in (self.bundle_display_name or self.bundle_name, self.bundle_version_str) if s)
def sources(self):
return (Source.objects.distinct()
.filter(machinesnapshot__osx_app_instances__app=self)
.order_by('module', 'name'))
def get_sources_for_display(self):
return " ".join(s.name for s in self.sources())
def current_instances(self):
return (self.osxappinstance_set.filter(machinesnapshot__currentmachinesnapshot__isnull=False)
.annotate(machinesnapshot_num=Count('machinesnapshot')))
def all_names(self):
if self.bundle_display_name:
yield self.bundle_display_name
if self.bundle_name and self.bundle_name != self.bundle_display_name:
yield self.bundle_name
def all_versions(self):
if self.bundle_version_str:
yield self.bundle_version_str
if self.bundle_version and self.bundle_version != self.bundle_version_str:
yield self.bundle_version
class Certificate(AbstractMTObject):
common_name = models.TextField(blank=True, null=True)
organization = models.TextField(blank=True, null=True)
organizational_unit = models.TextField(blank=True, null=True)
domain = models.TextField(blank=True, null=True)
sha_1 = models.CharField(max_length=40, blank=True, null=True)
sha_256 = models.CharField(max_length=64, blank=True, null=True, db_index=True)
valid_from = models.DateTimeField(blank=True, null=True)
valid_until = models.DateTimeField(blank=True, null=True)
signed_by = models.ForeignKey('self', on_delete=models.SET_NULL, blank=True, null=True)
class OSXAppInstance(AbstractMTObject):
app = models.ForeignKey(OSXApp, on_delete=models.PROTECT)
bundle_path = models.TextField(blank=True, null=True)
path = models.TextField(blank=True, null=True)
sha_1 = models.CharField(max_length=40, blank=True, null=True)
sha_256 = models.CharField(max_length=64, db_index=True, blank=True, null=True)
type = models.TextField(blank=True, null=True)
signed_by = models.ForeignKey(Certificate, on_delete=models.PROTECT, blank=True, null=True)
def certificate_chain(self):
chain = []
obj = self
while obj.signed_by:
chain.append(obj.signed_by)
obj = obj.signed_by
return chain
class DebPackage(AbstractMTObject):
name = models.TextField(blank=True, null=True)
version = models.TextField(blank=True, null=True)
source = models.TextField(blank=True, null=True)
size = models.BigIntegerField(blank=True, null=True)
arch = models.TextField(blank=True, null=True)
revision = models.TextField(blank=True, null=True)
status = models.TextField(blank=True, null=True)
maintainer = models.TextField(blank=True, null=True)
section = models.TextField(blank=True, null=True)
priority = models.TextField(blank=True, null=True)
class Program(AbstractMTObject):
name = models.TextField(blank=True, null=True)
version = models.TextField(blank=True, null=True)
language = models.TextField(blank=True, null=True)
publisher = models.TextField(blank=True, null=True)
identifying_number = models.TextField(blank=True, null=True)
class ProgramInstance(AbstractMTObject):
program = models.ForeignKey(Program, on_delete=models.PROTECT)
install_location = models.TextField(blank=True, null=True)
install_source = models.TextField(blank=True, null=True)
uninstall_string = models.TextField(blank=True, null=True)
install_date = models.DateTimeField(blank=True, null=True)
# see https://developers.google.com/android/management/reference/rest/v1/enterprises.devices#ApplicationReport
class AndroidApp(AbstractMTObject):
display_name = models.TextField()
version_name = models.TextField()
version_code = models.IntegerField(blank=True, null=True)
package_name = models.TextField(blank=True, null=True)
installer_package_name = models.TextField(blank=True, null=True)
# see https://developer.apple.com/documentation/devicemanagement/installedapplicationlistresponse/installedapplicationlistitem # NOQA
class IOSApp(AbstractMTObject):
name = models.TextField()
version = models.TextField()
ad_hoc_signed = models.BooleanField(blank=True, null=True)
app_store_vendable = models.BooleanField(blank=True, null=True)
beta_app = models.BooleanField(blank=True, null=True)
bundle_size = models.BigIntegerField(blank=True, null=True)
device_based_vpp = models.BooleanField(blank=True, null=True)
identifier = models.TextField(blank=True, null=True)
is_validated = models.BooleanField(blank=True, null=True)
short_version = models.TextField(blank=True, null=True)
class TeamViewer(AbstractMTObject):
teamviewer_id = models.TextField(blank=False, null=False)
release = models.TextField(blank=True, null=True)
unattended = models.NullBooleanField(blank=True, null=True)
class PuppetTrustedFacts(AbstractMTObject):
authenticated = models.CharField(max_length=16,
choices=(('remote', 'remote'),
('local', 'local'),
('false', 'false')))
extensions = JSONField(blank=True, null=True)
certname = models.TextField()
class PuppetCoreFacts(AbstractMTObject):
aio_agent_version = models.TextField(blank=True, null=True)
augeas_version = models.TextField(blank=True, null=True)
client_version = models.TextField(blank=True, null=True)
facter_version = models.TextField(blank=True, null=True)
ruby_sitedir = models.TextField(blank=True, null=True)
ruby_version = models.TextField(blank=True, null=True)
ruby_platform = models.TextField(blank=True, null=True)
class PuppetNode(AbstractMTObject):
environment = models.TextField()
trusted_facts = models.ForeignKey(PuppetTrustedFacts, on_delete=models.PROTECT, blank=True, null=True)
core_facts = models.ForeignKey(PuppetCoreFacts, on_delete=models.PROTECT, blank=True, null=True)
extra_facts = JSONField(blank=True, null=True)
class PrincipalUserSource(AbstractMTObject):
COMPANY_PORTAL = "COMPANY_PORTAL"
GOOGLE_CHROME = "GOOGLE_CHROME"
INVENTORY = "INVENTORY"
LOGGED_IN_USER = "LOGGED_IN_USER"
SANTA_MACHINE_OWNER = "SANTA_MACHINE_OWNER"
TYPE_CHOICES = (
(COMPANY_PORTAL, "Company portal"),
(GOOGLE_CHROME, "Google Chrome"),
(INVENTORY, "Inventory"),
(LOGGED_IN_USER, "Logged-in user"),
(SANTA_MACHINE_OWNER, "Santa machine owner"),
)
type = models.CharField(choices=TYPE_CHOICES, max_length=64)
properties = JSONField(blank=True, null=True)
class PrincipalUser(AbstractMTObject):
source = models.ForeignKey(PrincipalUserSource, on_delete=models.PROTECT)
unique_id = models.TextField(db_index=True)
principal_name = models.TextField(db_index=True)
display_name = models.TextField(blank=True, null=True)
class Payload(AbstractMTObject):
uuid = models.TextField()
identifier = models.TextField(blank=True, null=True)
display_name = models.TextField(blank=True, null=True)
description = models.TextField(blank=True, null=True)
type = models.TextField(blank=True, null=True)
class Profile(AbstractMTObject):
uuid = models.TextField(db_index=True)
identifier = models.TextField(blank=True, null=True)
display_name = models.TextField(blank=True, null=True)
description = models.TextField(blank=True, null=True)
organization = models.TextField(blank=True, null=True)
removal_disallowed = models.BooleanField(blank=True, null=True)
verified = models.BooleanField(blank=True, null=True)
install_date = models.DateTimeField(blank=True, null=True)
payloads = models.ManyToManyField(Payload)
class MachineSnapshotManager(MTObjectManager):
def current(self):
return (self.select_related('business_unit__meta_business_unit',
'os_version',
'system_info',
'teamviewer',
'puppet_node')
.filter(currentmachinesnapshot__isnull=False))
def current_platforms(self):
qs = (self.filter(platform__isnull=False, currentmachinesnapshot__isnull=False)
.values("platform").distinct())
return sorted((rd["platform"], PLATFORM_CHOICES_DICT[rd["platform"]]) for rd in qs)
def current_types(self):
qs = (self.filter(type__isnull=False, currentmachinesnapshot__isnull=False)
.values("type").distinct())
return sorted((rd["type"], TYPE_CHOICES_DICT[rd["type"]]) for rd in qs)
class MachineSnapshot(AbstractMTObject):
source = models.ForeignKey(Source, on_delete=models.PROTECT)
reference = models.TextField(blank=True, null=True, db_index=True)
serial_number = models.TextField(db_index=True)
imei = models.CharField(max_length=18, blank=True, null=True)
meid = models.CharField(max_length=18, blank=True, null=True)
links = models.ManyToManyField(Link)
business_unit = models.ForeignKey(BusinessUnit, on_delete=models.PROTECT, blank=True, null=True)
groups = models.ManyToManyField(MachineGroup)
os_version = models.ForeignKey(OSVersion, on_delete=models.PROTECT, blank=True, null=True)
platform = models.CharField(max_length=32, blank=True, null=True, choices=PLATFORM_CHOICES)
system_info = models.ForeignKey(SystemInfo, on_delete=models.PROTECT, blank=True, null=True)
type = models.CharField(max_length=32, blank=True, null=True, choices=TYPE_CHOICES)
disks = models.ManyToManyField(Disk)
network_interfaces = models.ManyToManyField(NetworkInterface)
android_apps = models.ManyToManyField(AndroidApp)
deb_packages = models.ManyToManyField(DebPackage)
ios_apps = models.ManyToManyField(IOSApp)
osx_app_instances = models.ManyToManyField(OSXAppInstance)
program_instances = models.ManyToManyField(ProgramInstance)
profiles = models.ManyToManyField(Profile)
teamviewer = models.ForeignKey(TeamViewer, on_delete=models.PROTECT, blank=True, null=True)
puppet_node = models.ForeignKey(PuppetNode, on_delete=models.PROTECT, blank=True, null=True)
principal_user = models.ForeignKey(PrincipalUser, on_delete=models.PROTECT, blank=True, null=True)
certificates = models.ManyToManyField(Certificate)
public_ip_address = models.GenericIPAddressField(blank=True, null=True, unpack_ipv4=True)
extra_facts = JSONField(blank=True, null=True)
objects = MachineSnapshotManager()
def get_machine_str(self):
if self.system_info and (self.system_info.computer_name or self.system_info.hostname):
return self.system_info.computer_name or self.system_info.hostname
else:
return self.serial_number
def groups_with_links(self):
for group in self.groups.prefetch_related('links', 'machine_links').all():
ll = []
for link in group.links.all():
ll.append((link.url, link.anchor_text))
for link in group.machine_links.all():
url = link.url
url = url.replace('%MACHINE_SNAPSHOT_REFERENCE%', self.reference)
ll.append((url, link.anchor_text))
yield group, ll
def ordered_android_apps(self):
return self.android_apps.all().order_by('display_name', 'version_name', 'pk')
def ordered_deb_packages(self):
return self.deb_packages.all().order_by('name', 'version', 'pk')
def ordered_ios_apps(self):
return self.ios_apps.all().order_by('name', 'version', 'pk')
def ordered_osx_app_instances(self):
return self.osx_app_instances.select_related('app').all().order_by('app__bundle_name',
'app__bundle_version_str',
'bundle_path')
def ordered_program_instances(self):
return self.program_instances.select_related('program').all().order_by('program__name',
'program__version',
'install_location')
def ordered_profiles(self):
return self.profiles.all().order_by('identifier', 'uuid', 'pk')
@cached_property
def last_commit(self):
try:
return self.machinesnapshotcommit_set.all().order_by("-id")[0]
except IndexError:
pass
class MachineSnapshotCommitManager(models.Manager):
def commit_machine_snapshot_tree(self, tree):
last_seen = tree.pop('last_seen', None)
if not last_seen:
last_seen = datetime.utcnow()
if timezone.is_aware(last_seen):
last_seen = timezone.make_naive(last_seen)
system_uptime = tree.pop('system_uptime', None)
update_ms_tree_platform(tree)
update_ms_tree_type(tree)
machine_snapshot, _ = MachineSnapshot.objects.commit(tree)
serial_number = machine_snapshot.serial_number
source = machine_snapshot.source
new_version = new_parent = None
try:
with transaction.atomic():
try:
msc = MachineSnapshotCommit.objects.filter(serial_number=serial_number,
source=source).order_by('-version')[0]
except IndexError:
new_version = 1
else:
if msc.machine_snapshot != machine_snapshot \
or msc.last_seen != last_seen \
or msc.system_uptime != system_uptime:
new_version = msc.version + 1
new_parent = msc
new_msc = None
if new_version:
new_msc = MachineSnapshotCommit.objects.create(serial_number=serial_number,
source=source,
version=new_version,
machine_snapshot=machine_snapshot,
parent=new_parent,
last_seen=last_seen,
system_uptime=system_uptime)
CurrentMachineSnapshot.objects.update_or_create(serial_number=serial_number,
source=source,
defaults={'machine_snapshot': machine_snapshot,
'last_seen': last_seen})
return new_msc, machine_snapshot, last_seen
except IntegrityError:
msc = MachineSnapshotCommit.objects.get(serial_number=serial_number,
source=source,
version=new_version)
if msc.machine_snapshot == machine_snapshot:
logger.warning("MachineSnapshotCommit race with same snapshot for "
"source {} and serial_number {}".format(source, serial_number))
return None, machine_snapshot, msc.last_seen
else:
raise MTOError("MachineSnapshotCommit race for "
"source {} and serial_number {}".format(source, serial_number))
class MachineSnapshotCommit(models.Model):
serial_number = models.TextField(db_index=True)
source = models.ForeignKey(Source, on_delete=models.CASCADE)
version = models.PositiveIntegerField(default=1)
machine_snapshot = models.ForeignKey(MachineSnapshot, on_delete=models.CASCADE)
parent = models.ForeignKey('self', on_delete=models.SET_NULL, blank=True, null=True)
last_seen = models.DateTimeField(blank=True, null=True)
system_uptime = models.PositiveIntegerField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
objects = MachineSnapshotCommitManager()
class Meta:
unique_together = ('serial_number', 'source', 'version')
def update_diff(self):
if not self.parent:
return None
else:
diff = self.machine_snapshot.diff(self.parent.machine_snapshot)
if self.parent.last_seen and self.parent.last_seen != self.last_seen:
diff["last_seen"] = {"removed": self.parent.last_seen}
if self.last_seen and self.parent.last_seen != self.last_seen:
diff.setdefault("last_seen", {})["added"] = self.last_seen
return diff
def get_system_update_for_display(self):
if self.system_uptime:
return str(timedelta(seconds=self.system_uptime)).strip(":0 ,")
class CurrentMachineSnapshot(models.Model):
serial_number = models.TextField(db_index=True)
source = models.ForeignKey(Source, on_delete=models.CASCADE)
machine_snapshot = models.ForeignKey(MachineSnapshot, on_delete=models.CASCADE)
last_seen = models.DateTimeField()
class Meta:
unique_together = ('serial_number', 'source')
class Taxonomy(models.Model):
"""A bag of tags, can be restricted to a MBU"""
meta_business_unit = models.ForeignKey(MetaBusinessUnit, on_delete=models.CASCADE, blank=True, null=True)
name = models.CharField(max_length=256, unique=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
if self.meta_business_unit:
return "{}/{}".format(self.meta_business_unit, self.name)
else:
return self.name
class Meta:
ordering = ("meta_business_unit__name", "name")
def links(self):
known_models = {
Tag: ("tag", "tags", None) # TODO: filter?
}
link_list = []
for related_objects in find_all_related_objects(self):
if related_objects.name == "meta_business_unit":
continue
if related_objects.objects_count:
if related_objects.to_model in known_models:
label, label_plural, url = known_models[related_objects.to_model]
link_list.append(("{} {}".format(related_objects.objects_count,
label if related_objects.objects_count == 1 else label_plural),
url))
else:
link_list.append(("{} {}".format(related_objects.objects_count,
related_objects.name),
None))
return link_list
class TagManager(models.Manager):
def available_for_meta_business_unit(self, meta_business_unit):
return self.filter(Q(meta_business_unit=meta_business_unit) | Q(meta_business_unit__isnull=True))
def used_in_inventory(self):
query = """
select tag_id, count(*) from (
select mt.tag_id, cms.serial_number
from inventory_machinetag as mt
join inventory_currentmachinesnapshot as cms on (mt.serial_number = cms.serial_number)
union
select mbut.tag_id, cms.serial_number
from inventory_metabusinessunittag as mbut
join inventory_businessunit as bu on mbut.meta_business_unit_id = bu.meta_business_unit_id
join inventory_machinesnapshot as ms on (ms.business_unit_id = bu.id)
join inventory_currentmachinesnapshot as cms on (cms.machine_snapshot_id = ms.id)
) as tag_serial_numbers
group by tag_id;"""
cursor = connection.cursor()
cursor.execute(query)
counts = {t[0]: t[1] for t in cursor.fetchall()}
for tag in self.filter(pk__in=counts.keys()):
yield tag, counts[tag.id]
def validate_color(value):
if not re.match(r'^[0-9a-fA-F]{6}$', value):
raise ValidationError(
_('%(value)s is not a valid color.'),
params={'value': value},
)
class Tag(models.Model):
taxonomy = models.ForeignKey(Taxonomy, on_delete=models.CASCADE, blank=True, null=True)
meta_business_unit = models.ForeignKey(MetaBusinessUnit, on_delete=models.CASCADE, blank=True, null=True)
name = models.CharField(max_length=50, unique=True)
slug = models.SlugField(unique=True, editable=False)
color = models.CharField(max_length=6,
default="0079bf", # blue from UpdateTagView
validators=[validate_color])
objects = TagManager()
def __str__(self):
if self.taxonomy:
return "{}: {}".format(self.taxonomy, self.name)
if self.meta_business_unit:
return "{}/{}".format(self.meta_business_unit, self.name)
else:
return self.name
class Meta:
ordering = ("meta_business_unit__name", "name")
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Tag, self).save(*args, **kwargs)
def links(self):
known_models = {
EnrollmentSecret: ("enrollment secret", "enrollment secrets", None),
MachineTag: ("machine", "machines",
"{}?tag={}".format(reverse("inventory:index"), self.id)),
MetaBusinessUnitTag: ("business unit", "business units",
"{}?tag={}".format(reverse("inventory:mbu"), self.id))
}
link_list = []
for related_objects in find_all_related_objects(self):
if related_objects.name in ("taxonomy", "meta_business_unit"):
continue
if related_objects.objects_count:
if related_objects.to_model in known_models:
label, label_plural, url = known_models[related_objects.to_model]
link_list.append(("{} {}".format(related_objects.objects_count,
label if related_objects.objects_count == 1 else label_plural),
url))
else:
link_list.append(("{} {}".format(related_objects.objects_count,
related_objects.name),
None))
return link_list
class MachineTag(models.Model):
serial_number = models.TextField()
tag = models.ForeignKey(Tag, on_delete=models.CASCADE)
class Meta:
unique_together = (('serial_number', 'tag'),)
class MetaBusinessUnitTag(models.Model):
meta_business_unit = models.ForeignKey(MetaBusinessUnit, on_delete=models.CASCADE)
tag = models.ForeignKey(Tag, on_delete=models.CASCADE)
class MetaMachine:
"""Simplified access to the ms."""
def __init__(self, serial_number, snapshots=None):
self.serial_number = serial_number
@cached_property
def _event_serialization_options(self):
return settings["apps"]["zentral.contrib.inventory"].get("event_serialization", {})
def _include_groups_in_serialized_info_for_event(self):
return self._event_serialization_options.get("include_groups", True)
def _include_principal_user_in_serialized_info_for_event(self):
return self._event_serialization_options.get("include_principal_user", True)
@classmethod
def from_urlsafe_serial_number(cls, urlsafe_serial_number):
if urlsafe_serial_number.startswith("."):
urlsafe_serial_number = urlsafe_serial_number[1:].encode("utf-8")
urlsafe_serial_number += -len(urlsafe_serial_number) % 4 * b"="
serial_number = base64.urlsafe_b64decode(urlsafe_serial_number).decode("utf-8")
else:
serial_number = urlsafe_serial_number
return cls(serial_number)
@staticmethod
def make_urlsafe_serial_number(serial_number):
if serial_number.startswith(".") or \
urllib.parse.quote(serial_number, safe="") != serial_number:
return ".{}".format(
base64.urlsafe_b64encode(serial_number.encode("utf-8")).decode("utf-8").rstrip("=")
)
else:
return serial_number
def get_urlsafe_serial_number(self):
return self.make_urlsafe_serial_number(self.serial_number)
def get_absolute_url(self):
return reverse('inventory:machine', args=(self.get_urlsafe_serial_number(),))
@cached_property
def snapshots(self):
return list(MachineSnapshot.objects.current().filter(serial_number=self.serial_number))
@cached_property
def computer_name(self):
for ms in self.snapshots:
if ms.system_info and ms.system_info.computer_name:
return ms.system_info.computer_name
def get_snapshots_sources_for_display(self):
return sorted((s.source for s in self.snapshots), key=lambda s: s.name)
def get_url(self):
try:
tls_hostname = settings['api']['tls_hostname']
except KeyError:
logger.warning("Missing api.tls_hostname configuration key")
else:
return "{}{}".format(tls_hostname.rstrip('/'), self.get_absolute_url())
@property
def names_with_sources(self):
names = {}
for ms in self.snapshots:
names.setdefault(ms.get_machine_str(), []).append(ms.source.name)
return names
# Meta? Business units
def business_units(self, include_api_enrollment_business_unit=False):
bu_l = []
for ms in self.snapshots:
if (ms.business_unit and
(include_api_enrollment_business_unit or
not ms.business_unit.is_api_enrollment_business_unit())):
bu_l.append(ms.business_unit)
return bu_l
@cached_property
def meta_business_units(self):
return set(bu.meta_business_unit for bu in self.business_units(include_api_enrollment_business_unit=True))
@cached_property
def meta_business_unit_id_set(self):
return set(mbu.id for mbu in self.meta_business_units)
@cached_property
def platform(self):
c = Counter(ms.platform for ms in self.snapshots if ms.platform)
try:
return c.most_common(1)[0][0]
except IndexError:
pass
@cached_property
def type(self):
c = Counter(ms.type for ms in self.snapshots if ms.type)
try:
return c.most_common(1)[0][0]
except IndexError:
pass
@cached_property
def has_deb_packages(self):
return any(has_deb_packages(ms) for ms in self.snapshots)
# Filtered snapshots
def snapshots_with_android_apps(self):
return list(ms for ms in self.snapshots if ms.android_apps.count())
def snapshots_with_deb_packages(self):
return list(ms for ms in self.snapshots if ms.deb_packages.count())
def snapshots_with_ios_apps(self):
return list(ms for ms in self.snapshots if ms.ios_apps.count())
def snapshots_with_osx_app_instances(self):
return list(ms for ms in self.snapshots if ms.osx_app_instances.count())
def snapshots_with_profiles(self):
return list(ms for ms in self.snapshots if ms.profiles.count())
def snapshots_with_program_instances(self):
return list(ms for ms in self.snapshots if ms.program_instances.count())
# Inventory tags
@cached_property
def tags_with_types(self):
tags = [('machine', mt.tag)
for mt in (MachineTag.objects.select_related('tag',
'tag__meta_business_unit',
'tag__taxonomy',
'tag__taxonomy__meta_business_unit')
.filter(serial_number=self.serial_number))]
tags.extend(('meta_business_unit', mbut.tag)
for mbut in (MetaBusinessUnitTag.objects.select_related('tag',
'tag__meta_business_unit',
'tag__taxonomy',
'tag__taxonomy__meta_business_unit')
.filter(meta_business_unit__in=self.meta_business_units)))
tags.sort(key=lambda t: (t[1].meta_business_unit is None, str(t[1]).upper()))
return tags
@cached_property
def tags(self):
tags = list({t[1] for t in self.tags_with_types})
tags.sort(key=lambda t: (t.meta_business_unit is None, str(t).upper()))
return tags
def available_tags(self):
# tags w/o mbu or w mbu where this machine is and that this machine does not have yet
tags = set([])
for meta_business_unit in self.meta_business_units:
tags.update(Tag.objects.available_for_meta_business_unit(meta_business_unit))
tags = list(tags.difference(self.tags))
tags.sort(key=lambda t: (t.meta_business_unit is None, str(t).upper()))
return tags
def tag_names(self):
# Optimized for only one SQL query
query = (
"select name from inventory_tag where id in ("
" select tag_id from inventory_machinetag where serial_number = %s"
" union"
" select tag_id from inventory_metabusinessunittag as mbut"
" join inventory_businessunit as bu on (bu.meta_business_unit_id = mbut.meta_business_unit_id)"
" join inventory_machinesnapshot as ms on (ms.business_unit_id = bu.id)"
" join inventory_currentmachinesnapshot as cms on (cms.machine_snapshot_id = ms.id)"
" where cms.serial_number = %s"
")"
)
cursor = connection.cursor()
cursor.execute(query, [self.serial_number, self.serial_number])
return sorted((t[0] for t in cursor.fetchall()), key=lambda n: n.lower())
def update_taxonomy_tags(self, taxonomy, tag_names):
tag_names = set(tag_names)
with transaction.atomic():
existing_machine_tags = (MachineTag.objects.select_for_update()
.select_related("tag")
.filter(serial_number=self.serial_number,
tag__taxonomy=taxonomy))
existing_tag_names = set(mt.tag.name for mt in existing_machine_tags)
# delete old tags
tag_names_to_delete = existing_tag_names - tag_names
if tag_names_to_delete:
existing_machine_tags.filter(tag__name__in=tag_names_to_delete).delete()
# add missing tags
for tag_name in tag_names - existing_tag_names:
try:
with transaction.atomic():
tag, _ = Tag.objects.get_or_create(taxonomy=taxonomy, name=tag_name)
except IntegrityError:
logger.error("Tag collision, taxonomy '%s', name '%s'", taxonomy.pk, tag_name)
else:
MachineTag.objects.get_or_create(serial_number=self.serial_number, tag=tag)
def max_incident_severity(self):
return (MachineIncident.objects.select_related("incident")
.filter(serial_number=self.serial_number, status__in=Status.open_values())
.aggregate(max_incident_severity=Max("incident__severity"))
)["max_incident_severity"]
def open_incidents(self):
return list(MachineIncident.objects.select_related("incident")
.filter(serial_number=self.serial_number, status__in=Status.open_values()))
def compliance_check_statuses(self):
return get_machine_compliance_check_statuses(self.serial_number, self.tags)
def archive(self):
CurrentMachineSnapshot.objects.filter(serial_number=self.serial_number).delete()
def has_recent_source_snapshot(self, source_module, max_age=3600):
query = (
"select count(*) from inventory_currentmachinesnapshot as cms "
"join inventory_source as s on (cms.source_id = s.id) "
"join inventory_machinesnapshotcommit as msc on (cms.machine_snapshot_id=msc.machine_snapshot_id) "
"where cms.serial_number = %s and s.module = %s and msc.last_seen > %s"
)
args = [self.serial_number, source_module, timezone.now() - timedelta(seconds=max_age)]
with connection.cursor() as cursor:
cursor.execute(query, args)
t = cursor.fetchone()
return t[0] > 0
# events related methods
@cached_property
def _raw_info_for_event(self):
"""Returns all the machine info necessary for the events in 1 DB query.
This is used in get_probe_filtering_values and get_serialized_info_for_event. Both of them are used during
the event enrichment pipeline step, thus the use of the single cached property as source.
"""
query = (
"with ms as ("
" select s.name as src, ms.id, ms.type, ms.platform,"
" ms.business_unit_id, ms.os_version_id, ms.system_info_id, ms.principal_user_id"
" from inventory_machinesnapshot as ms"
" join inventory_currentmachinesnapshot as cms on (cms.machine_snapshot_id = ms.id)"
" join inventory_source as s on (ms.source_id = s.id)"
" where cms.serial_number = %s"
"), bu as ("
" select s.name as src, s.module, bu.id, bu.key, bu.meta_business_unit_id, bu.name, bu.reference"
" from inventory_businessunit as bu"
" join ms on (ms.business_unit_id = bu.id)"
" join inventory_source as s on (s.id = bu.source_id)"
"), t as ("
" select mt.tag_id as id from inventory_machinetag as mt where mt.serial_number = %s"
" union"
" select mbut.tag_id as id from inventory_metabusinessunittag as mbut"
" join bu on (bu.meta_business_unit_id = mbut.meta_business_unit_id)"
") "
# hostnames
"select ms.src, 'system_info' as key,"
"jsonb_build_object('computer_name', si.computer_name, 'hostname', si.hostname) "
"from inventory_systeminfo as si "
"join ms on ms.system_info_id = si.id "
"where si.computer_name is not null or si.hostname is not null "
"union "
# business units not API only
"select bu.src, 'business_unit' as key,"
"jsonb_build_object('reference', bu.reference, 'name', bu.name, 'key', substring(bu.key, 0, 9)) "
"from bu "
"where bu.module <> 'zentral.contrib.inventory'"
"union "
# tags
"select null, 'tags' as key,"
"jsonb_build_object('id', t.id, 'name', it.name) "
"from t join inventory_tag as it on (it.id = t.id) "
"union "
# os versions
"select ms.src, 'os_version' as key,"
"jsonb_build_object("
" 'name', osv.name, 'major', osv.major, 'minor', osv.minor, 'patch', osv.patch, 'build', osv.build"
") "
"from inventory_osversion as osv join ms on (ms.os_version_id = osv.id) "
"union "
# platforms
"select null, 'platforms' as key,"
"jsonb_agg(distinct ms.platform) from ms where ms.platform is not null "
"union "
# types
"select null, 'types' as key,"
"jsonb_agg(distinct ms.type) from ms where ms.type is not null "
"union "
# meta business units
"select null, 'meta_business_units' as key,"
"jsonb_build_object('id', mbu.id, 'name', mbu.name) "
"from inventory_metabusinessunit as mbu "
"join bu on (bu.meta_business_unit_id = mbu.id)"
)
if self._include_groups_in_serialized_info_for_event():
query += (
"union "
# groups
"select ms.src, 'groups' as key, "
"jsonb_build_object('reference', g.reference, 'name', g.name, 'key', substring(g.key, 0, 9)) "
"from inventory_machinegroup as g "
"join inventory_machinesnapshot_groups as msg on (msg.machinegroup_id = g.id) "
"join ms on (ms.id = msg.machinesnapshot_id) "
)
if self._include_principal_user_in_serialized_info_for_event():
query += (
"union "
# principal user
"select ms.src, 'principal_user' as key,"
"jsonb_build_object('id', pu.id, 'unique_id', pu.unique_id, 'principal_name', pu.principal_name) "
"from ms join inventory_principaluser as pu on (ms.principal_user_id = pu.id) "
)
cursor = connection.cursor()
cursor.execute(query, [self.serial_number, self.serial_number])
return cursor.fetchall()
def get_probe_filtering_values(self):
"""Returns the values used by the probe inventory filters."""
platform_fv = None
type_fv = None
mbu_ids = set([])
tag_ids = set([])
for src, key, agg in self._raw_info_for_event:
if not agg:
continue
if key == "platforms":
platform_fv = Counter(agg).most_common(1)[0][0]
elif key == "types":
type_fv = Counter(agg).most_common(1)[0][0]
elif key == "meta_business_units":
mbu_ids.add(agg["id"])
elif key == "tags":
tag_ids.add(agg["id"])
return (platform_fv, type_fv, mbu_ids, tag_ids)
@cached_property
def cached_probe_filtering_values(self):
"""Cached version of get_probe_filtering_values"""
cache_key = "mm-probe-fvs_{}".format(self.get_urlsafe_serial_number())
filtering_values = cache.get(cache_key)
if filtering_values is None:
filtering_values = self.get_probe_filtering_values()
cache.set(cache_key, filtering_values, 60) # TODO: Hard coded timeout value
return filtering_values
def get_legacy_serialized_info_for_event(self):
"""Serialize the machine information to be included in the events.
Legacy output. Triggers a lot of DB queries. Kept for reference.
"""
machine_d = {}
for ms in self.snapshots:
source = ms.source
ms_d = {'name': ms.get_machine_str()}
if ms.business_unit:
if not ms.business_unit.is_api_enrollment_business_unit():
ms_d['business_unit'] = {'reference': ms.business_unit.reference,
'key': ms.business_unit.get_short_key(),
'name': ms.business_unit.name}
if ms.os_version:
ms_d['os_version'] = str(ms.os_version)
if self._include_groups_in_serialized_info_for_event():
for group in ms.groups.all():
ms_d.setdefault('groups', []).append({'reference': group.reference,
'key': group.get_short_key(),
'name': group.name})
if self._include_principal_user_in_serialized_info_for_event() and ms.principal_user:
ms_d['principal_user'] = {'id': ms.principal_user.pk,
'unique_id': ms.principal_user.unique_id,
'principal_name': ms.principal_user.principal_name}
key = slugify(source.name)
machine_d[key] = ms_d
for tag in self.tags:
machine_d.setdefault('tags', []).append({'id': tag.id,
'name': tag.name})
for meta_business_unit in self.meta_business_units:
machine_d.setdefault('meta_business_units', []).append({
'name': meta_business_unit.name,
'id': meta_business_unit.id
})
if self.platform:
machine_d['platform'] = self.platform
if self.type:
machine_d['type'] = self.type
return machine_d
def get_serialized_info_for_event(self):
"""Serialize the machine information to be included in the events.
Legacy ouput. Only 1 DB query.
"""
machine_d = {}
for src, key, agg in self._raw_info_for_event:
if not agg:
continue
if src:
d = machine_d.setdefault(slugify(src), {})
else:
d = machine_d
if key == "system_info":
d["name"] = agg["computer_name"] or agg["hostname"]
elif key == "os_version":
build = agg.get("build")
os_version_items = (
agg.get("name"),
".".join(str(v) for v in (agg.get(k) for k in ('major', 'minor', 'patch')) if v is not None),
f"({build})" if build else None
)
os_version_str = " ".join(s for s in os_version_items if s)
if os_version_str:
d[key] = os_version_str
elif key in ("types", "platforms"):
d[key[:-1]] = Counter(agg).most_common(1)[0][0]
elif key == "principal_user":
d[key] = agg
else:
d.setdefault(key, []).append(agg)
return machine_d
@cached_property
def cached_serialized_info_for_event(self):
"""Cached version of get_serialized_info_for_event"""
cache_key = "mm-si_{}".format(self.get_urlsafe_serial_number())
serialized_info = cache.get(cache_key)
if serialized_info is None:
serialized_info = self.get_serialized_info_for_event()
cache.set(cache_key, serialized_info, 60) # TODO: Hard coded timeout value
return serialized_info
class MACAddressBlockAssignmentOrganization(models.Model):
name = models.TextField()
address = models.TextField()
class Meta:
unique_together = (('name', 'address'),)
class MACAddressBlockAssignmentManager(models.Manager):
def import_assignment(self, registry, assignment, organization_name, organization_address):
organization, _ = MACAddressBlockAssignmentOrganization.objects.get_or_create(name=organization_name,
address=organization_address)
return MACAddressBlockAssignment.objects.update_or_create(assignment=assignment,
defaults={"registry": registry,
"organization": organization})
class MACAddressBlockAssignment(models.Model):
registry = models.CharField(max_length=8,
choices=(('MA-L', 'MA-L'),
('MA-M', 'MA-M'),
('MA-S', 'MA-S')))
assignment = models.CharField(max_length=9, unique=True)
organization = models.ForeignKey(MACAddressBlockAssignmentOrganization, on_delete=models.CASCADE)
objects = MACAddressBlockAssignmentManager()
def __str__(self):
return " ".join((self.registry, self.assignment))
# Enrollment
class EnrollmentSecretManager(models.Manager):
def verify(self, model, secret,
user_agent, public_ip_address,
serial_number=None, udid=None,
meta_business_unit=None,
**kwargs):
kwargs.update({"{}__isnull".format(model): False,
"secret": secret})
qs = self.filter(**kwargs).select_related(model).select_for_update()
err_msg = None
if not qs.count():
raise EnrollmentSecretVerificationFailed("unknown secret")
else:
es = qs[0]
is_valid, err_msg = es.is_valid(serial_number, udid, meta_business_unit)
if is_valid:
esr = EnrollmentSecretRequest.objects.create(enrollment_secret=es,
user_agent=user_agent,
public_ip_address=public_ip_address,
serial_number=serial_number,
udid=udid)
es.request_count += 1
es.save()
return esr
else:
raise EnrollmentSecretVerificationFailed(err_msg, es)
class EnrollmentSecret(models.Model):
secret = models.CharField(max_length=256, unique=True, editable=False)
meta_business_unit = models.ForeignKey(
MetaBusinessUnit, on_delete=models.PROTECT,
help_text="The business unit the machine will be assigned to at enrollment",
)
tags = models.ManyToManyField(
Tag, blank=True,
help_text="The tags that the machine will get at enrollment"
)
serial_numbers = ArrayField(models.TextField(), blank=True, null=True)
udids = ArrayField(models.TextField(), blank=True, null=True)
quota = models.IntegerField(null=True, blank=True, validators=[MinValueValidator(1),
MaxValueValidator(200000)])
request_count = models.IntegerField(default=0, validators=[MinValueValidator(0)], editable=False)
revoked_at = models.DateTimeField(null=True, blank=True)
expired_at = models.DateTimeField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True, editable=False)
objects = EnrollmentSecretManager()
@property
def is_revoked(self):
return self.revoked_at is not None
@property
def is_expired(self):
return bool(self.expired_at and self.expired_at <= timezone.now())
@property
def is_used_up(self):
return bool(self.quota and self.request_count >= self.quota)
def is_valid(self, serial_number=None, udid=None, meta_business_unit=None):
err_msg = None
if self.is_revoked:
err_msg = "revoked"
elif self.is_expired:
err_msg = "expired"
elif serial_number and self.serial_numbers and serial_number not in self.serial_numbers:
err_msg = "serial number mismatch"
elif udid and self.udids and udid not in self.udids:
err_msg = "udid mismatch"
elif meta_business_unit and meta_business_unit != self.meta_business_unit:
err_msg = "business unit mismatch"
elif self.is_used_up:
err_msg = "quota used up"
if err_msg:
return False, err_msg
else:
return True, None
def save(self, *args, **kwargs):
if not self.pk:
self.secret = get_random_string(kwargs.pop("secret_length", 64))
super().save(*args, **kwargs)
def serialize_for_event(self):
d = {}
for attr in ("pk",
"quota", "request_count", "is_used_up",
"revoked_at", "is_revoked",
"expired_at", "is_expired",
"created_at"):
val = getattr(self, attr)
if val is not None:
d[attr] = val
tags = [{"pk": t.pk, "name": t.name} for t in self.tags.all()]
if tags:
d["tags"] = tags
if self.meta_business_unit:
d["meta_business_unit"] = self.meta_business_unit.serialize()
if self.serial_numbers:
d["serial_numbers"] = self.serial_numbers
if self.udids:
d["udids"] = self.udids
return {"enrollment_secret": d}
def get_api_enrollment_business_unit(self):
try:
return self.meta_business_unit.api_enrollment_business_units()[0]
except (AttributeError, IndexError):
pass
def urlsafe_serial_numbers(self):
for serial_number in self.serial_numbers:
yield serial_number, MetaMachine(serial_number).get_urlsafe_serial_number()
class EnrollmentSecretRequest(models.Model):
enrollment_secret = models.ForeignKey(EnrollmentSecret, on_delete=models.CASCADE)
user_agent = models.TextField()
public_ip_address = models.GenericIPAddressField()
serial_number = models.TextField(null=True, blank=True)
udid = models.TextField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
class BaseEnrollment(models.Model):
secret = models.OneToOneField(EnrollmentSecret,
on_delete=models.CASCADE,
related_name="%(app_label)s_%(class)s", editable=False)
version = models.PositiveSmallIntegerField(default=1, editable=False)
distributor_content_type = models.ForeignKey(ContentType, on_delete=models.PROTECT,
related_name="+",
null=True, editable=False)
distributor_pk = models.PositiveIntegerField(null=True, editable=False)
distributor = GenericForeignKey("distributor_content_type", "distributor_pk")
created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_at = models.DateTimeField(auto_now=True, editable=False)
def get_description_for_distributor(self):
return str(self)
class Meta:
abstract = True
unique_together = (("distributor_content_type", "distributor_pk"),)
def can_be_deleted(self):
return not self.distributor
def can_be_revoked(self):
return not self.secret.is_revoked
def save(self, *args, **kwargs):
if self.pk:
self.version = F("version") + 1
super().save(*args, **kwargs)
if self.distributor:
self.distributor.enrollment_update_callback()
self.refresh_from_db()
def delete(self, *args, **kwargs):
if self.can_be_deleted():
self.secret.delete()
super().delete(*args, **kwargs)
else:
raise ValueError("Enrollment {} cannot be deleted".format(self.pk))
def serialize_for_event(self):
enrollment_dict = {"pk": self.pk,
"created_at": self.created_at}
enrollment_dict.update(self.secret.serialize_for_event())
distributor = self.distributor
if distributor and hasattr(distributor, "serialize_for_event"):
enrollment_dict.update(distributor.serialize_for_event())
return enrollment_dict
# Observed files
class FileManager(MTObjectManager):
def search(self, **kwargs):
qs = self.all()
name = kwargs.get("name")
if name:
qs = qs.filter(Q(name__icontains=name)
| Q(bundle__bundle_id__icontains=name)
| Q(bundle__bundle_name__icontains=name))
return qs.select_related("bundle").order_by("bundle__bundle_name", "name")
else:
return []
def search_certificates(self, **kwargs):
q = kwargs.get("query")
if not q:
return []
else:
query = (
"WITH RECURSIVE certificates AS ("
"SELECT c1.id, c1.signed_by_id "
"FROM inventory_certificate AS c1 "
"JOIN inventory_file invf ON (invf.signed_by_id = c1.id) "
"UNION "
"SELECT c2.id, c2.signed_by_id "
"FROM inventory_certificate AS c2 "
"JOIN certificates c ON (c.signed_by_id = c2.id)"
") SELECT * FROM inventory_certificate c3 "
"JOIN certificates AS c ON (c.id = c3.id) "
"WHERE UPPER(c3.common_name) LIKE UPPER(%s) "
"OR UPPER(c3.organization) LIKE UPPER(%s) "
"OR UPPER(c3.organizational_unit) LIKE UPPER(%s) "
"ORDER BY c3.common_name, c3.organization, c3.organizational_unit;"
)
q = "%{}%".format(connection.ops.prep_for_like_query(q))
return Certificate.objects.raw(query, [q, q, q])
class File(AbstractMTObject):
source = models.ForeignKey(Source, on_delete=models.PROTECT)
name = models.TextField()
path = models.TextField()
sha_256 = models.CharField(max_length=64, db_index=True)
bundle = models.ForeignKey(OSXApp, on_delete=models.PROTECT, blank=True, null=True)
bundle_path = models.TextField(blank=True, null=True)
signed_by = models.ForeignKey(Certificate, on_delete=models.PROTECT, blank=True, null=True)
objects = FileManager()
# compliance checks
class JMESPathCheck(models.Model):
compliance_check = models.OneToOneField(
"compliance_checks.ComplianceCheck",
on_delete=models.CASCADE,
related_name="jmespath_check",
editable=False,
)
source_name = models.TextField()
platforms = ArrayField(
models.CharField(max_length=32, choices=PLATFORM_CHOICES),
blank=True,
default=list,
help_text="Restrict this check to some platforms"
)
tags = models.ManyToManyField(Tag, blank=True)
jmespath_expression = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def get_absolute_url(self):
return reverse("inventory:compliance_check", args=(self.pk,))
def get_platforms_display(self):
return ", ".join(sorted(PLATFORM_CHOICES_DICT.get(p) for p in self.platforms))
def serialize_for_event(self):
return {
"pk": self.pk,
"source_name": self.source_name,
"tags": sorted(str(tag) for tag in self.tags.select_related("taxonomy", "meta_business_unit").all()),
"jmespath_expression": self.jmespath_expression,
}
|
zentralopensource/zentral
|
zentral/contrib/inventory/models.py
|
Python
|
apache-2.0
| 67,736
|
# -*- coding: utf-8 -*-
#
# Release Notes build configuration file, created by
# sphinx-quickstart on Thu Feb 12 02:10:34 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
import os
import sys
sys.path.append(os.path.abspath('_ext'))
# Set this up to parse Django-driven code.
sys.path.insert(0, os.path.join(__file__, "..", ".."))
sys.path.insert(0, os.path.dirname(__file__))
import reviewboard
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx', 'extralinks']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Release Notes'
copyright = u'2009-2011, Christian Hammond'
bugtracker_url = 'http://www.reviewboard.org/bugs/%s'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join([str(i) for i in reviewboard.__version_info__[:2]])
# The full version, including alpha/beta/rc tags.
release = reviewboard.get_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
html_theme = 'default'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Release Notes"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'ReleaseNotes'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('contents', 'ReleaseNotes.tex', ur'Release Notes',
ur'Christian Hammond', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://www.reviewboard.org/docs/manual/dev': None}
|
Khan/reviewboard
|
docs/releasenotes/conf.py
|
Python
|
mit
| 6,554
|
import math
import re
import numpy as np
import pytest
import tensorflow as tf
from tensorflow_similarity.retrieval_metrics import BNDCG
@pytest.fixture
def test_data():
return {
"query_labels": tf.constant([1, 1, 1, 0]),
"lookup_distances": tf.constant(
[
[0.0, 0.1, 0.2],
[0.0, 0.1, 0.2],
[0.0, 0.1, 0.2],
[0.0, 0.1, 0.2],
],
dtype=float,
),
"match_mask": tf.constant(
[
[True, True, False],
[True, True, False],
[True, True, False],
[False, False, True],
],
dtype=bool,
),
}
def test_compute_distance_threshold(test_data):
"""Test filtering using distance threshold."""
rm = BNDCG(k=3, distance_threshold=0.1)
bndcg = rm.compute(
query_labels=test_data["query_labels"],
lookup_distances=test_data["lookup_distances"],
match_mask=test_data["match_mask"],
)
expected = tf.constant(0.75)
np.testing.assert_allclose(bndcg, expected)
def test_compute_at_k(test_data):
"""Test filtering using K."""
rm = BNDCG(k=2)
assert rm.distance_threshold == math.inf
bndcg = rm.compute(
query_labels=test_data["query_labels"],
lookup_distances=test_data["lookup_distances"],
match_mask=test_data["match_mask"],
)
expected = tf.constant(0.75)
np.testing.assert_allclose(bndcg, expected)
def test_compute_macro(test_data):
"""Test filtering using K."""
rm = BNDCG(k=2, average="macro")
assert rm.distance_threshold == math.inf
bndcg = rm.compute(
query_labels=test_data["query_labels"],
lookup_distances=test_data["lookup_distances"],
match_mask=test_data["match_mask"],
)
expected = tf.constant(0.5)
np.testing.assert_allclose(bndcg, expected)
def test_query_and_lookup_distances_different_dims():
query_labels = tf.constant([1, 2, 3, 4])
lookup_distances = tf.constant(
[
[0.0, 0.1, 0.2],
[0.0, 0.1, 0.2],
],
dtype=float,
)
match_mask = tf.constant(
[
[True, True, False],
[True, True, False],
[True, True, False],
[False, False, True],
],
dtype=bool,
)
rm = BNDCG(k=3)
msg = (
"The number of lookup distance rows must equal the number of query "
"labels. Number of lookup distance rows is 2 but the number of "
"query labels is 4."
)
with pytest.raises(ValueError, match=re.escape(msg)):
_ = rm.compute(
query_labels=query_labels,
lookup_distances=lookup_distances,
match_mask=match_mask,
)
|
tensorflow/similarity
|
tests/retrieval_metrics/test_bndcg.py
|
Python
|
apache-2.0
| 2,836
|
# Copyright 2021 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from odoo import fields, models
class ResConfigSettings(models.TransientModel):
_inherit = "res.config.settings"
shipment_advice_outgoing_backorder_policy = fields.Selection(
related="company_id.shipment_advice_outgoing_backorder_policy", readonly=False
)
|
OCA/stock-logistics-transport
|
shipment_advice/models/res_config_settings.py
|
Python
|
agpl-3.0
| 374
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.