gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from math import *
from cmath import rect as from_polar
from itertools import *
from frostsynth import *
from frostsynth.fft import *
from frostsynth.polytable import *
four_pi = 4 * pi
def sineping_gen(amplitude, frequency, decay, phi=0, srate=None):
"""Generates amplitude * sin(2 * pi * t * frequency + phi) * exp(-t * decay)."""
srate = get_srate(srate)
dt = 1 / srate
d = exp(-dt * decay)
w = two_pi * dt * frequency
a1 = 2 * d * cos(w)
a2 = d * d
y1 = amplitude * sin(phi)
yield y1
y0 = amplitude * sin(phi + w) * d
yield y0
while True:
y2 = y0
y0 = a1 * y0 - a2 * y1
yield y0
y1 = y0
y0 = a1 * y0 - a2 * y2
yield y0
def sinepings_gen(amplitudes, frequencies, decays, phis=None, srate=None):
if phis is None:
phis = repeat(0)
sps = [sineping_gen(*params) for params in zip(amplitudes, frequencies, decays, phis, repeat(srate))]
return mix_gen(sps)
def sinepings(amplitudes, frequencies, decays, phis=None, accuracy_goal=1e-4, srate=None):
duration_constant = -log(accuracy_goal)
if phis is None:
phis = repeat(0)
sps = [timeslice_gen(sineping_gen(*params), duration_constant / params[2], srate=srate) for params in zip(amplitudes, frequencies, decays, phis, repeat(srate))]
return mix_longest(sps)
def bl_sinepings_gen(amplitudes, frequencies, decays, phis=None, srate=None):
if srate is None:
srate = get_srate()
nyquist = 0.5 * srate
if phis is None:
phis = repeat(0)
new_params = []
for a, f, d, t in zip(amplitudes, frequencies, decays, phis):
if f < nyquist:
new_params.append((a, f, d, t, srate))
sps = [sineping_gen(*params) for params in new_params]
return mix_gen(sps)
def bl_gen(coef_fun, freq_gen, srate=None, fade_bandwith=2000, max_partials=4000):
if srate is None:
srate = get_srate()
nyquist = 0.5 * srate
fade_start = nyquist - fade_bandwith
i_fade_bandwith = 1.0 / fade_bandwith
dt = 1.0 / srate
t = 0.0
phis = [0.0] * max_partials
dphi = two_pi * dt
for f in freq_gen:
s = 0.0
for k in range(1, max_partials + 1):
amplitude, multiplier = coef_fun(k, f, t)
partial_freq = f * multiplier
if partial_freq > nyquist:
break
elif partial_freq > fade_start:
amplitude *= 1.0 - (partial_freq - fade_start) * i_fade_bandwith
s += sin(phis[k - 1]) * amplitude
phis[k - 1] += dphi * partial_freq
yield s
t += dt
def saw_coefs(k, f, t):
return (-0.4 / k, k)
def square_coefs(k, f, t):
k = k + k - 1
return (-0.6 / k, k)
def log_drum_coefs(k, f, t):
return (0.2 * exp(-t * k) / k ** 1.5, log(k) + 1)
def irfft_waveform(window):
derivates_window = [two_pi_j * b * i for i, b in enumerate(window)]
window = rpad(window + [0.0] * len(window))
derivates_window = rpad(derivates_window + [0.0] * len(derivates_window))
values = unnormalized_irfft(window)
derivatives = irfft(derivates_window)
ct = CubicTable(zip(values, derivatives), periodic=True)
l = len(ct)
return lambda p: ct(p * l)
def irfft_waveform2(windows, periodic_y=False):
data = []
for window in windows:
derivates_window = [two_pi_j * b * i for i, b in enumerate(window)]
window = rpad(window + [0.0] * len(window))
derivates_window = rpad(derivates_window + [0.0] * len(derivates_window))
values = unnormalized_irfft(window)
derivatives = irfft(derivates_window)
data.append(zip(values, derivatives))
xct = XCubicTable2D(data, periodic_x=True, periodic_y=periodic_y)
lx = xct.len_x
ly = xct.len_y
if periodic_y:
return lambda p, s: xct(p * lx, s * ly)
else:
ly -= 2
return lambda p, s: xct(p * lx, s * ly)
# TODO: Fade bandwidth.
def irfft_osc_gen(frequency, windows, dt=0.1, srate=None):
srate = get_srate(srate)
nyquist = 0.5 * srate
dt_ = 1.0 / srate
i_dt = 1.0 / dt
frequency = iter(frequency)
if isinstance(windows, Sequence):
if not isinstance(windows[0], Sequence):
windows = repeat(windows)
windows = iter(windows)
f = next(frequency)
window1 = [b for i, b in enumerate(next(windows)) if i * f < nyquist]
wf1 = irfft_waveform(window1)
yield wf1(0)
t = dt + dt_
phase = dt_ * f
for f in frequency:
if t > dt:
wf0 = wf1
window1 = [b for i, b in enumerate(next(windows)) if i * f < nyquist]
wf1 = irfft_waveform(window1)
t -= dt
w0 = wf0(phase)
yield w0 + t * i_dt * (wf1(phase) - w0)
phase += dt_ * f
t += dt_
def cos_sum(phase, coefs):
"""Evaluates sum(coef * cos(2 * pi * k * phase) for k, coef in enumerate(coefs, 0))."""
if not coefs:
return 0
elif len(coefs) == 1:
return coefs[0]
elif len(coefs) == 2:
return coefs[0] + coefs[1] * cos(two_pi * phase)
else:
c = cos(two_pi * phase)
c2 = c + c
bk = coefs[-1]
bk1 = coefs[-2] + c2 * bk
for coef in coefs[-3:0:-1]:
bk1, bk = coef + c2 * bk1 - bk, bk1
return coefs[0] + c * bk1 - bk
def cos_octave_sum(phase, coefs):
"""Evaluates sum(coef * cos(2 * pi * 2 ** k * phase) for k, coef in enumerate(coefs, 0))."""
if not coefs:
return 0
elif len(coefs) == 1:
return coefs[0] * cos(two_pi * phase)
else:
c = cos(two_pi * phase)
result = coefs[0] * c
for coef in coefs[1:]:
c = 2 * c * c - 1
result += coef * c
return result
def sin_sum(phase, coefs):
"""Evaluates sum(coef * sin(2 * pi * k * phase) for k, coef in enumerate(coefs, 1))."""
if not coefs:
return 0
elif len(coefs) == 1:
return coefs[0] * sin(two_pi * phase)
elif len(coefs) == 2:
return coefs[0] * sin(two_pi * phase) + coefs[1] * sin(four_pi * phase)
else:
c2 = 2 * cos(two_pi * phase)
bk = coefs[-1]
bk1 = coefs[-2] + c2 * bk
for coef in coefs[-3:0:-1]:
bk1, bk = coef + c2 * bk1 - bk, bk1
return sin(two_pi * phase) * (coefs[0] + c2 * bk1 - bk)
def sin_odd_sum(phase, coefs):
"""Evaluates sum(coef * sin(2 * pi * (2 * k - 1) * phase) for k, coef in enumerate(coefs, 1))."""
if not coefs:
return 0
elif len(coefs) == 1:
return coefs[0] * sin(two_pi * phase)
else:
s = sin(two_pi * phase)
s2 = 2 - 4 * s * s
bk = coefs[-1]
bk1 = coefs[-2] + s2 * bk
for coef in coefs[-3::-1]:
bk1, bk = coef + s2 * bk1 - bk, bk1
return s * (bk1 + bk)
def sin_octave_sum(phase, coefs):
"""Evaluates sum(coef * sin(2 * pi * 2 ** k * phase) for k, coef in enumerate(coefs, 0))."""
if not coefs:
return 0
elif len(coefs) == 1:
return coefs[0] * sin(two_pi * phase)
else:
c = cos(two_pi * phase)
s = sin(two_pi * phase)
result = coefs[0] * s
s *= c + c
result += coefs[1] * s
for coef in coefs[2:]:
c = 2 * c * c - 1
s *= c + c
result += coef * s
return result
def sin_tritave_sum(phase, coefs):
"""Evaluates sum(coef * sin(2 * pi * 3 ** k * phase) for k, coef in enumerate(coefs, 0))."""
if not coefs:
return 0
elif len(coefs) == 1:
return coefs[0] * sin(two_pi * phase)
else:
s = sin(two_pi * phase)
result = coefs[0] * s
for coef in coefs[1:]:
s = s * (3 - 4 * s * s)
result += coef * s
return result
def cis_sum(phase, coefs, sharpness=1):
"""Evaluates sum(coef * exp(2j * pi * k * phase) for k, coef in enumerate(coefs, 0))."""
if not coefs:
return 0j
else:
z = from_polar(sharpness, two_pi * phase)
result = coefs[-1]
for coef in coefs[-2::-1]:
result = coef + z * result
return result
def cheb_sum(x, coefs):
"""Evaluates sum(coef * T(k, x) for k, coef in enumerate(coefs, 0)) where T(k, x) is the k:th Chebyshev polynomial."""
if not coefs:
return 0
else:
# Actually faster than cos_sum because pypy hates slices.
x2 = x + x
bk = 0
bk1 = 0
for coef in reversed(coefs):
bk1, bk = coef + x2 * bk1 - bk, bk1
return bk1 - x * bk
def s_odd_sum(x, coefs):
if not coefs:
return 0
else:
s2 = 2 - 4 * x * x
bk = 0
bk1 = 0
for coef in reversed(coefs):
bk1, bk = coef + s2 * bk1 - bk, bk1
return x * (bk1 + bk)
def triangle_octave_sum(x, coefs):
if not coefs:
return 0
else:
t = (x + x) % 2 - 1
result = 0
for coef in coefs:
t = abs(t)
t = t + t - 1
result += coef * t
return result
|
|
"""
Base view
"""
import uuid
import base64
from ..views import DEFAULT_LIBRARY_NAME_PREFIX, DEFAULT_LIBRARY_DESCRIPTION, \
USER_ID_KEYWORD
from flask import request, current_app
from flask.ext.restful import Resource
from ..models import db, User, Library, Permissions
from ..client import client
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from ..biblib_exceptions import BackendIntegrityError, PermissionDeniedError
class BaseView(Resource):
"""
A base view class to keep a single version of common functions used between
all of the views.
"""
@staticmethod
def helper_uuid_to_slug(library_uuid):
"""
Convert a UUID to a slug
See a discussion about the details here:
http://stackoverflow.com/questions/12270852/
convert-uuid-32-character-hex-string-into-a-
youtube-style-short-id-and-back
:param library_uuid: unique identifier for the library
:return: library_slug: base64 URL safe slug
"""
library_slug = base64.urlsafe_b64encode(library_uuid.bytes)
library_slug = library_slug.rstrip('=\n').replace('/', '_')
current_app.logger.info('Converted uuid: {0} to slug: {1}'
.format(library_uuid, library_slug))
return library_slug
@staticmethod
def helper_slug_to_uuid(library_slug):
"""
Convert a slug to a UUID
See a discussion about the details here:
http://stackoverflow.com/questions/12270852/
convert-uuid-32-character-hex-string-into-a-
youtube-style-short-id-and-back
Keep in mind that base64 only works on bytes, and so they have to be
encoded in ASCII. Flask uses unicode, and so you must modify the
encoding before passing it to base64. This is fine, given we output
all our encoded URLs for libraries as strings encoded in ASCII and do
not accept any unicode characters.
:param library_slug: base64 URL safe slug
:return: library_uuid: unique identifier for the library
"""
library_uuid = (library_slug + '==').replace('_', '/')
library_uuid = library_uuid.encode('ascii')
library_uuid = uuid.UUID(bytes=base64.urlsafe_b64decode(library_uuid))
current_app.logger.info('Converted slug: {0} to uuid: {1}'
.format(library_slug, library_uuid))
return str(library_uuid)
@staticmethod
def helper_get_user_id():
"""
Helper function: get the user id from the header, otherwise raise
a key error exception
:return: unique API user ID
"""
try:
user = request.headers[USER_ID_KEYWORD]
if user.isdigit():
user = int(user)
return user
except KeyError:
current_app.logger.error('No username passed')
raise
@staticmethod
def helper_create_user(absolute_uid):
"""
Creates a user in the database with a UID from the API
:param absolute_uid: UID from the API
:return: SQLAlchemy User instance
"""
try:
user = User(absolute_uid=absolute_uid)
db.session.add(user)
db.session.commit()
current_app.logger.info('Successfully created user: {0} [API] as '
'{1} [Microservice]'
.format(absolute_uid, user.id))
return user
except IntegrityError as error:
current_app.logger.error('IntegrityError. User: {0:d} was not'
'added. Full traceback: {1}'
.format(absolute_uid, error))
raise
@staticmethod
def helper_user_exists(absolute_uid):
"""
Checks if a use exists before it would attempt to create one
:param absolute_uid: UID from the API
:return: boolean for if the user exists
"""
user_count = User.query.filter(User.absolute_uid == absolute_uid).all()
user_count = len(user_count)
if user_count == 1:
current_app.logger.info('User exists in database: {0} [API]'
.format(absolute_uid))
return True
elif user_count == 0:
current_app.logger.warning('User does not exist in database: {0} '
'[API]'.format(absolute_uid))
return False
@staticmethod
def helper_absolute_uid_to_service_uid(absolute_uid):
"""
Convert the API UID to the BibLib service ID.
If the user does not exist in the database, first create a user.
:param absolute_uid: API UID
:return: BibLib service ID
"""
if not BaseView.helper_user_exists(absolute_uid=absolute_uid):
user = BaseView.helper_create_user(absolute_uid=absolute_uid)
else:
user = User.query.filter(User.absolute_uid == absolute_uid).one()
current_app.logger.info('User found: {0} -> {1}'
.format(absolute_uid, user.id))
return user.id
@staticmethod
def helper_email_to_api_uid(permission_data):
"""
A proxy to the user/e-mail resolver service. Passes on any errors from
the API.
:param permission_data: dictionary that should contain an e-mail key
:return: int of the user id
"""
try:
service = '{api}/{email}'.format(
api=current_app.config['BIBLIB_USER_EMAIL_ADSWS_API_URL'],
email=permission_data['email']
)
current_app.logger.info('Obtaining UID of user: {0}'
.format(permission_data['email']))
response = client().get(
service
)
except KeyError as error:
current_app.logger.error('No user email provided. [{0}]'
.format(error))
raise
if response.status_code == 200:
return int(response.json()['uid'])
elif response.status_code == 404:
raise NoResultFound('API does not have this user')
else:
raise Exception('Unknown internal error')
@staticmethod
def helper_access_allowed(service_uid, library_id, access_type):
"""
Determines if the given user has permissions to look at the content
of a library.
:param service_uid: the user ID within this microservice
:param library_id: the unique ID of the library
:param access_type: list of access types to check
:return: boolean, access (True), no access (False)
"""
try:
permissions = Permissions.query.filter(
Permissions.library_id == library_id,
Permissions.user_id == service_uid
).one()
return getattr(permissions, access_type)
except NoResultFound as error:
current_app.logger.error('No permissions for '
'user: {0}, library: {1}, permission: {2}'
' [{3}]'.format(service_uid, library_id,
access_type, error))
return False
@staticmethod
def helper_library_exists(library_id):
"""
Helper function that checks if a library exists in the database or not
by catching the raise and returning a True/False statement.
:param library_id: the unique ID of the library
:return: bool for exists (True) or does not (False)
"""
try:
Library.query.filter(Library.id == library_id).one()
return True
except NoResultFound:
return False
@staticmethod
def helper_validate_library_data(service_uid, library_data):
"""
Validates the library data to ensure the user does not give empty
content for the title and description.
:param service_uid: the user ID within this microservice
:param library_data: content needed to create a library
:return: validated name and description
"""
_name = library_data.get('name') or DEFAULT_LIBRARY_NAME_PREFIX
_description = library_data.get('description') or \
DEFAULT_LIBRARY_DESCRIPTION
current_app.logger.info('Creating library for user_service: {0:d}, '
'with properties: {1}'
.format(service_uid, library_data))
# We want to ensure that the users have unique library names. However,
# it should be possible that they have access to other libraries from
# other people, that have the same name
library_names = \
[i.library.name for i in
Permissions.query.filter(Permissions.user_id == service_uid,
Permissions.owner == True).all()]
matches = [name for name in library_names if name == _name]
if matches:
current_app.logger.error('Name supplied for the library already '
'exists: "{0}" ["{1}"]'.format(_name,
matches))
raise BackendIntegrityError('Library name already exists.')
if _name == DEFAULT_LIBRARY_NAME_PREFIX:
default_names = [lib_name for lib_name
in library_names
if DEFAULT_LIBRARY_NAME_PREFIX
in lib_name]
_extension = len(default_names) + 1
_name = '{0} {1}'.format(_name,
_extension)
library_out = {}
for key in library_data:
library_out[key] = library_data[key]
library_out['name'] = _name
library_out['description'] = _description
return library_out
|
|
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
CORE_LABELS = {
"ARM7TDMI-S": ["ARM7"],
"Cortex-M0" : ["M0", "CORTEX_M"],
"Cortex-M0+": ["M0P", "CORTEX_M"],
"Cortex-M1" : ["M1", "CORTEX_M"],
"Cortex-M3" : ["M3", "CORTEX_M"],
"Cortex-M4" : ["M4", "CORTEX_M"],
"Cortex-M4F" : ["M4", "CORTEX_M"],
"Cortex-M7" : ["M7", "CORTEX_M"],
"Cortex-M7F" : ["M7", "CORTEX_M"],
"Cortex-A9" : ["A9", "CORTEX_A"]
}
import os
import binascii
import struct
import shutil
from workspace_tools.patch import patch
from paths import TOOLS_BOOTLOADERS
class Target:
def __init__(self):
# ARM Core
self.core = None
# Is the disk provided by the interface chip of this board virtual?
self.is_disk_virtual = False
# list of toolchains that are supported by the mbed SDK for this target
self.supported_toolchains = None
# list of extra specific labels
self.extra_labels = []
# list of macros (-D)
self.macros = []
# Default online compiler:
self.default_toolchain = "ARM"
self.name = self.__class__.__name__
# Code used to determine devices' platform
# This code is prefix in URL link provided in mbed.htm (in mbed disk)
self.detect_code = []
def program_cycle_s(self):
return 4 if self.is_disk_virtual else 1.5
def get_labels(self):
return [self.name] + CORE_LABELS[self.core] + self.extra_labels
def init_hooks(self, hook, toolchain_name):
pass
### MCU Support ###
class CM4_UARM(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.supported_toolchains = ["uARM"]
self.default_toolchain = "uARM"
class CM4_ARM(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class CM4F_UARM(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.supported_toolchains = ["uARM"]
self.default_toolchain = "uARM"
class CM4F_ARM(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
### NXP ###
# This class implements the post-link patching step needed by LPC targets
class LPCTarget(Target):
def __init__(self):
Target.__init__(self)
def init_hooks(self, hook, toolchain_name):
hook.hook_add_binary("post", self.lpc_patch)
@staticmethod
def lpc_patch(t_self, resources, elf, binf):
t_self.debug("LPC Patch: %s" % os.path.split(binf)[1])
patch(binf)
class LPC11C24(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11XX_11CXX', 'LPC11CXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
class LPC1114(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11XX_11CXX', 'LPC11XX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
class LPC11U24(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'LPC11U24_401']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.detect_code = ["1040"]
class OC_MBUINO(LPC11U24):
def __init__(self):
LPC11U24.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.macros = ['TARGET_LPC11U24']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
class LPC11U24_301(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
class LPC11U34_421(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
class MICRONFCBOARD(LPC11U34_421):
def __init__(self):
LPC11U34_421.__init__(self)
self.macros = ['LPC11U34_421', 'APPNEARME_MICRONFCBOARD']
self.extra_labels = ['NXP', 'LPC11UXX', 'APPNEARME_MICRONFCBOARD']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
class LPC11U35_401(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
class LPC11U35_501(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'MCU_LPC11U35_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR" , "IAR"]
self.default_toolchain = "uARM"
class LPC11U35_501_IBDAP(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'MCU_LPC11U35_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR" , "IAR"]
self.default_toolchain = "uARM"
class XADOW_M0(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'MCU_LPC11U35_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
class LPC11U35_Y5_MBUG(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'MCU_LPC11U35_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR" , "IAR"]
self.default_toolchain = "uARM"
class LPC11U37_501(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
class LPCCAPPUCCINO(LPC11U37_501):
def __init__(self):
LPC11U37_501.__init__(self)
class ARCH_GPRS(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'LPC11U37_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
class LPC11U68(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC11U6X']
self.supported_toolchains = ["ARM", "uARM", "GCC_CR", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.detect_code = ["1168"]
class LPC1347(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC13XX']
self.supported_toolchains = ["ARM", "GCC_ARM","IAR"]
class LPC1549(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC15XX']
self.supported_toolchains = ["uARM", "GCC_CR", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.detect_code = ["1549"]
class LPC1768(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC176X', 'MBED_LPC1768']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CS", "GCC_CR", "IAR"]
self.detect_code = ["1010"]
class ARCH_PRO(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC176X']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CS", "GCC_CR", "IAR"]
self.macros = ['TARGET_LPC1768']
self.supported_form_factors = ["ARDUINO"]
class UBLOX_C027(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC176X']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CS", "GCC_CR", "IAR"]
self.macros = ['TARGET_LPC1768']
self.supported_form_factors = ["ARDUINO"]
class XBED_LPC1768(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC176X', 'XBED_LPC1768']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CS", "GCC_CR", "IAR"]
self.macros = ['TARGET_LPC1768']
self.detect_code = ["1010"]
class LPC2368(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "ARM7TDMI-S"
self.extra_labels = ['NXP', 'LPC23XX']
self.supported_toolchains = ["ARM", "GCC_ARM", "GCC_CR"]
class LPC2460(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "ARM7TDMI-S"
self.extra_labels = ['NXP', 'LPC2460']
self.supported_toolchains = ["GCC_ARM"]
class LPC810(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC81X']
self.supported_toolchains = ["uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.is_disk_virtual = True
class LPC812(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC81X']
self.supported_toolchains = ["uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.detect_code = ["1050"]
class LPC824(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC82X']
self.supported_toolchains = ["uARM", "GCC_ARM","GCC_CR", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
class SSCI824(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC82X']
self.supported_toolchains = ["uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
self.is_disk_virtual = True
class LPC4088(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['NXP', 'LPC408X']
self.supported_toolchains = ["ARM", "GCC_CR", "GCC_ARM", "IAR"]
self.is_disk_virtual = True
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['ARM_STD', 'ARM_MICRO']:
hook.hook_add_binary("post", self.binary_hook)
@staticmethod
def binary_hook(t_self, resources, elf, binf):
if not os.path.isdir(binf):
# Regular binary file, nothing to do
LPCTarget.lpc_patch(t_self, resources, elf, binf)
return
outbin = open(binf + ".temp", "wb")
partf = open(os.path.join(binf, "ER_IROM1"), "rb")
# Pad the fist part (internal flash) with 0xFF to 512k
data = partf.read()
outbin.write(data)
outbin.write('\xFF' * (512*1024 - len(data)))
partf.close()
# Read and append the second part (external flash) in chunks of fixed size
chunksize = 128 * 1024
partf = open(os.path.join(binf, "ER_IROM2"), "rb")
while True:
data = partf.read(chunksize)
outbin.write(data)
if len(data) < chunksize:
break
partf.close()
outbin.close()
# Remove the directory with the binary parts and rename the temporary
# file to 'binf'
shutil.rmtree(binf, True)
os.rename(binf + '.temp', binf)
t_self.debug("Generated custom binary file (internal flash + SPIFI)")
LPCTarget.lpc_patch(t_self, resources, elf, binf)
class LPC4088_DM(LPC4088):
pass
class LPC4330_M4(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['NXP', 'LPC43XX', 'LPC4330']
self.supported_toolchains = ["ARM", "GCC_CR", "IAR", "GCC_ARM"]
class LPC4330_M0(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC43XX', 'LPC4330']
self.supported_toolchains = ["ARM", "GCC_CR", "IAR"]
class LPC4337(LPCTarget):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['NXP', 'LPC43XX', 'LPC4337']
self.supported_toolchains = ["ARM"]
class LPC1800(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC43XX']
self.supported_toolchains = ["ARM", "GCC_CR", "IAR"]
class LPC11U37H_401(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
### Freescale ###
class KL05Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
class KL25Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["ARM", "GCC_CW_EWL", "GCC_CW_NEWLIB", "GCC_ARM","IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.detect_code = ["0200"]
class KL26Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["ARM","GCC_ARM","IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
class KL43Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["GCC_ARM", "ARM"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
class KL46Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["GCC_ARM", "ARM", "IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.detect_code = ["0220"]
class K20D50M(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.extra_labels = ['Freescale', 'K20XX']
self.supported_toolchains = ["GCC_ARM", "ARM", "IAR"]
self.is_disk_virtual = True
self.detect_code = ["0230"]
class TEENSY3_1(Target):
OUTPUT_EXT = 'hex'
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.extra_labels = ['Freescale', 'K20XX', 'K20DX256']
self.supported_toolchains = ["GCC_ARM", "ARM"]
self.is_disk_virtual = True
self.detect_code = ["0230"]
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['ARM_STD', 'ARM_MICRO', 'GCC_ARM']:
hook.hook_add_binary("post", self.binary_hook)
@staticmethod
def binary_hook(t_self, resources, elf, binf):
from intelhex import IntelHex
binh = IntelHex()
binh.loadbin(binf, offset = 0)
with open(binf.replace(".bin", ".hex"), "w") as f:
binh.tofile(f, format='hex')
class K22F(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['Freescale', 'KPSDK_MCUS', 'KPSDK_CODE']
self.macros = ["CPU_MK22FN512VLH12", "FSL_RTOS_MBED"]
self.supported_toolchains = ["ARM", "GCC_ARM", "IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.detect_code = ["0201"]
class K64F(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['Freescale', 'KPSDK_MCUS', 'KPSDK_CODE', 'MCU_K64F', 'FRDM']
self.macros = ["CPU_MK64FN1M0VMD12", "FSL_RTOS_MBED"]
self.supported_toolchains = ["ARM", "GCC_ARM", "IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.default_toolchain = "ARM"
self.detect_code = ["0240"]
class MTS_GAMBIT(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['Freescale', 'KPSDK_MCUS', 'KPSDK_CODE', 'MCU_K64F']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.macros = ["CPU_MK64FN1M0VMD12", "FSL_RTOS_MBED", "TARGET_K64F"]
self.is_disk_virtual = True
self.default_toolchain = "ARM"
### STMicro ###
class NUCLEO_F030R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F030R8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0725"]
class NUCLEO_F070RB(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F070RB']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0755"]
class NUCLEO_F072RB(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F072RB']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0730"]
class NUCLEO_F091RC(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F091RC']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0750"]
class NUCLEO_F103RB(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32F1', 'STM32F103RB']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0700"]
class NUCLEO_F302R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F302R8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0705"]
class NUCLEO_F303RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F303RE']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0745"]
class NUCLEO_F334R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F334R8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0735"]
class NUCLEO_F401RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F401RE']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0720"]
class NUCLEO_F411RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F411RE']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0740"]
class ELMO_F411RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F411RE']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.detect_code = ["----"]
class NUCLEO_F446RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F446RE']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0777"]
class NUCLEO_L053R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['STM', 'STM32L0', 'STM32L053R8']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0715"]
class NUCLEO_L073RZ(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['STM', 'STM32L0', 'STM32L073RZ']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0760"]
class NUCLEO_L152RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32L1', 'STM32L152RE']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0710"]
class NUCLEO_L476RG(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32L4', 'STM32L476RG']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0765"]
class STM32F3XX(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.extra_labels = ['STM', 'STM32F3XX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
class STM32F407(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F4XX']
self.supported_toolchains = ["ARM", "GCC_ARM", "IAR"]
class ARCH_MAX(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F407', 'STM32F407VG']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.supported_form_factors = ["ARDUINO"]
self.macros = ['LSI_VALUE=32000']
def program_cycle_s(self):
return 2
class DISCO_F051R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F051', 'STM32F051R8']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "uARM"
class DISCO_F100RB(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32F1', 'STM32F100RB']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "uARM"
class DISCO_F303VC(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F303', 'STM32F303VC']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "uARM"
class DISCO_F334C8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F334C8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.detect_code = ["0810"]
class DISCO_F407VG(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F407', 'STM32F407VG']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
class DISCO_F429ZI(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F429', 'STM32F429ZI']
self.supported_toolchains = ["GCC_ARM", "IAR"]
self.default_toolchain = "GCC_ARM"
class DISCO_L053C8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['STM', 'STM32L0', 'STM32L053C8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
class DISCO_F746NG(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M7"
self.extra_labels = ['STM', 'STM32F7', 'STM32F746', 'STM32F746NG']
self.supported_toolchains = ["ARM", "uARM", "IAR"]
self.default_toolchain = "uARM"
self.detect_code = ["0815"]
class DISCO_L476VG(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32L4', 'STM32L476VG']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.detect_code = ["0820"]
class MTS_MDOT_F405RG(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F405RG']
self.macros = ['HSE_VALUE=26000000', 'OS_CLOCK=48000000']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.is_disk_virtual = True
self.default_toolchain = "ARM"
class MTS_MDOT_F411RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F411RE']
self.macros = ['HSE_VALUE=26000000', 'OS_CLOCK=96000000', 'USE_PLL_HSE_EXTC=0', 'VECT_TAB_OFFSET=0x00010000']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "ARM"
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['GCC_ARM', 'ARM_STD', 'ARM_MICRO']:
hook.hook_add_binary("post", self.combine_bins)
# combine application binary with bootloader
# bootloader + padding to 64kB + application + md5sum (16 bytes)
@staticmethod
def combine_bins(t_self, resources, elf, binf):
loader = os.path.join(TOOLS_BOOTLOADERS, "MTS_MDOT_F411RE", "bootloader.bin")
target = binf + ".tmp"
if not os.path.exists(loader):
print "Can't find bootloader binary: " + loader
return
outbin = open(target, 'w+b')
part = open(loader, 'rb')
data = part.read()
outbin.write(data)
outbin.write('\xFF' * (64*1024 - len(data)))
part.close()
part = open(binf, 'rb')
data = part.read()
outbin.write(data)
part.close()
outbin.seek(0, 0)
data = outbin.read()
outbin.seek(0, 1)
crc = struct.pack('<I', binascii.crc32(data) & 0xFFFFFFFF)
outbin.write(crc)
outbin.close()
os.remove(binf)
os.rename(target, binf)
class MTS_DRAGONFLY_F411RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F411RE']
self.macros = ['HSE_VALUE=26000000', 'VECT_TAB_OFFSET=0x08010000']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "ARM"
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['GCC_ARM', 'ARM_STD', 'ARM_MICRO']:
hook.hook_add_binary("post", self.combine_bins)
# combine application binary with bootloader
# bootloader + padding to 64kB + application + md5sum (16 bytes)
@staticmethod
def combine_bins(t_self, resources, elf, binf):
loader = os.path.join(TOOLS_BOOTLOADERS, "MTS_DRAGONFLY_F411RE", "bootloader.bin")
target = binf + ".tmp"
if not os.path.exists(loader):
print "Can't find bootloader binary: " + loader
return
outbin = open(target, 'w+b')
part = open(loader, 'rb')
data = part.read()
outbin.write(data)
outbin.write('\xFF' * (64*1024 - len(data)))
part.close()
part = open(binf, 'rb')
data = part.read()
outbin.write(data)
part.close()
outbin.seek(0, 0)
data = outbin.read()
outbin.seek(0, 1)
crc = struct.pack('<I', binascii.crc32(data) & 0xFFFFFFFF)
outbin.write(crc)
outbin.close()
os.remove(binf)
os.rename(target, binf)
class MOTE_L152RC(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32L1', 'STM32L152RC']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.detect_code = ["4100"]
class DISCO_F401VC(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F401', 'STM32F401VC']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "GCC_ARM"
class UBLOX_C029(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F439', 'STM32F439ZI']
self.macros = ['HSE_VALUE=24000000', 'HSE_STARTUP_TIMEOUT=5000']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
class NZ32SC151(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32L1', 'STM32L151RC']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
# After flashing device, how long to delay until we assume program is running
def program_cycle_s(self):
return 1.5
### Nordic ###
class MCU_NRF51(Target):
# the following is a list of possible Nordic softdevices in decreasing order
# of preference.
EXPECTED_SOFTDEVICES_WITH_OFFSETS = [
{
'name' : 's130_nrf51_1.0.0_softdevice.hex',
'boot' : 's130_nrf51_1.0.0_bootloader.hex',
'offset' : 0x1C000
},
{
'name' : 's110_nrf51822_8.0.0_softdevice.hex',
'boot' : 's110_nrf51822_8.0.0_bootloader.hex',
'offset' : 0x18000
},
{
'name' : 's110_nrf51822_7.1.0_softdevice.hex',
'boot' : 's110_nrf51822_7.1.0_bootloader.hex',
'offset' : 0x16000
},
{
'name' : 's110_nrf51822_7.0.0_softdevice.hex',
'boot' : 's110_nrf51822_7.0.0_bootloader.hex',
'offset' : 0x16000
},
{
'name' : 's110_nrf51822_6.0.0_softdevice.hex',
'boot' : 's110_nrf51822_6.0.0_bootloader.hex',
'offset' : 0x14000
}
]
OVERRIDE_BOOTLOADER_FILENAME = "nrf51822_bootloader.hex"
OUTPUT_EXT = 'hex'
MERGE_SOFT_DEVICE = True
MERGE_BOOTLOADER = False
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ["NORDIC", "MCU_NRF51", "MCU_NRF51822"]
self.macros = ['NRF51', 'TARGET_NRF51822']
self.supported_toolchains = ["ARM", "GCC_ARM", "IAR"]
self.is_disk_virtual = True
self.detect_code = ["1070"]
def program_cycle_s(self):
return 6
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['ARM_STD', 'ARM_MICRO', 'GCC_ARM', 'IAR']:
hook.hook_add_binary("post", self.binary_hook)
@staticmethod
def binary_hook(t_self, resources, elf, binf):
# Scan to find the actual paths of soft device
sdf = None
for softdeviceAndOffsetEntry in t_self.target.EXPECTED_SOFTDEVICES_WITH_OFFSETS:
for hexf in resources.hex_files:
if hexf.find(softdeviceAndOffsetEntry['name']) != -1:
t_self.debug("SoftDevice file found %s." % softdeviceAndOffsetEntry['name'])
sdf = hexf
if sdf is not None: break
if sdf is not None: break
if sdf is None:
t_self.debug("Hex file not found. Aborting.")
return
# Look for bootloader file that matches this soft device or bootloader override image
blf = None
if t_self.target.MERGE_BOOTLOADER is True:
for hexf in resources.hex_files:
if hexf.find(t_self.target.OVERRIDE_BOOTLOADER_FILENAME) != -1:
t_self.debug("Bootloader file found %s." % t_self.target.OVERRIDE_BOOTLOADER_FILENAME)
blf = hexf
break
elif hexf.find(softdeviceAndOffsetEntry['boot']) != -1:
t_self.debug("Bootloader file found %s." % softdeviceAndOffsetEntry['boot'])
blf = hexf
break
# Merge user code with softdevice
from intelhex import IntelHex
binh = IntelHex()
binh.loadbin(binf, offset=softdeviceAndOffsetEntry['offset'])
if t_self.target.MERGE_SOFT_DEVICE is True:
t_self.debug("Merge SoftDevice file %s" % softdeviceAndOffsetEntry['name'])
sdh = IntelHex(sdf)
binh.merge(sdh)
if t_self.target.MERGE_BOOTLOADER is True and blf is not None:
t_self.debug("Merge BootLoader file %s" % blf)
blh = IntelHex(blf)
binh.merge(blh)
with open(binf.replace(".bin", ".hex"), "w") as f:
binh.tofile(f, format='hex')
# 16KB Nordic targets are tight on SRAM using S130 (default) so we
# introduce two possible options:
# 1) Use S130 (default) - for this derive from MCU_NRF51_16K
# 2) Use S110 - for this derive from MCU_NRF51_16K_S110
# Note that the 'default' option will track the default choice
# for other Nordic targets, and so can take advantage of other
# future SoftDevice improvements
# The *_BASE targets should *not* be inherited from, as they do not
# specify enough for building a target
# 16KB MCU version, e.g. Nordic nRF51822, Seeed Arch BLE, etc.
class MCU_NRF51_16K_BASE(MCU_NRF51):
def __init__(self):
MCU_NRF51.__init__(self)
self.extra_labels += ['MCU_NORDIC_16K', 'MCU_NRF51_16K']
self.macros += ['TARGET_MCU_NORDIC_16K', 'TARGET_MCU_NRF51_16K']
# derivative class used to create softdevice+bootloader enabled images
class MCU_NRF51_16K_BOOT_BASE(MCU_NRF51_16K_BASE):
def __init__(self):
MCU_NRF51_16K_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_BOOT']
self.macros += ['TARGET_MCU_NRF51_16K_BOOT', 'TARGET_OTA_ENABLED']
self.MERGE_SOFT_DEVICE = True
self.MERGE_BOOTLOADER = True
# derivative class used to create program only images for use with FOTA
class MCU_NRF51_16K_OTA_BASE(MCU_NRF51_16K_BASE):
def __init__(self):
MCU_NRF51_16K_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_OTA']
self.macros += ['TARGET_MCU_NRF51_16K_OTA', 'TARGET_OTA_ENABLED']
self.MERGE_SOFT_DEVICE = False
class MCU_NRF51_16K(MCU_NRF51_16K_BASE):
def __init__(self):
MCU_NRF51_16K_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_S130']
self.macros += ['TARGET_MCU_NRF51_16K_S130']
class MCU_NRF51_16K_S110(MCU_NRF51_16K_BASE):
def __init__(self):
MCU_NRF51_16K_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_S110']
self.macros += ['TARGET_MCU_NRF51_16K_S110']
class MCU_NRF51_16K_BOOT(MCU_NRF51_16K_BOOT_BASE):
def __init__(self):
MCU_NRF51_16K_BOOT_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_S130']
self.macros += ['TARGET_MCU_NRF51_16K_S130']
class MCU_NRF51_16K_BOOT_S110(MCU_NRF51_16K_BOOT_BASE):
def __init__(self):
MCU_NRF51_16K_BOOT_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_S110']
self.macros += ['TARGET_MCU_NRF51_16K_S110']
class MCU_NRF51_16K_OTA(MCU_NRF51_16K_OTA_BASE):
def __init__(self):
MCU_NRF51_16K_OTA_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_S130']
self.macros += ['TARGET_MCU_NRF51_16K_S130']
class MCU_NRF51_16K_OTA_S110(MCU_NRF51_16K_OTA_BASE):
def __init__(self):
MCU_NRF51_16K_OTA_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_S110']
self.macros += ['TARGET_MCU_NRF51_16K_S110']
# 32KB MCU version, e.g. Nordic nRF51-DK, nRF51-Dongle, etc.
class MCU_NRF51_32K(MCU_NRF51):
def __init__(self):
MCU_NRF51.__init__(self)
self.extra_labels += ['MCU_NORDIC_32K', 'MCU_NRF51_32K']
self.macros += ['TARGET_MCU_NORDIC_32K', 'TARGET_MCU_NRF51_32K']
class MCU_NRF51_32K_BOOT(MCU_NRF51_32K):
def __init__(self):
MCU_NRF51_32K.__init__(self)
self.extra_labels += ['MCU_NRF51_32K_BOOT']
self.macros += ['TARGET_MCU_NRF51_32K_BOOT', 'TARGET_OTA_ENABLED']
self.MERGE_SOFT_DEVICE = True
self.MERGE_BOOTLOADER = True
class MCU_NRF51_32K_OTA(MCU_NRF51_32K):
def __init__(self):
MCU_NRF51_32K.__init__(self)
self.extra_labels += ['MCU_NRF51_32K_OTA']
self.macros += ['TARGET_MCU_NRF51_32K_OTA', 'TARGET_OTA_ENABLED']
self.MERGE_SOFT_DEVICE = False
#
# nRF51 based development kits
#
# This one is special for legacy reasons
class NRF51822(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
self.extra_labels += ['NRF51822', 'NRF51822_MKIT']
self.macros += ['TARGET_NRF51822_MKIT']
class NRF51822_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['NRF51822', 'NRF51822_MKIT']
self.macros += ['TARGET_NRF51822_MKIT']
class NRF51822_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['NRF51822', 'NRF51822_MKIT']
self.macros += ['TARGET_NRF51822_MKIT']
class ARCH_BLE(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
self.supported_form_factors = ["ARDUINO"]
class ARCH_BLE_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['ARCH_BLE']
self.macros += ['TARGET_ARCH_BLE']
self.supported_form_factors = ["ARDUINO"]
class ARCH_BLE_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['ARCH_BLE']
self.macros += ['TARGET_ARCH_BLE']
self.supported_form_factors = ["ARDUINO"]
class ARCH_LINK(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
self.extra_labels += ['ARCH_BLE']
self.macros += ['TARGET_ARCH_BLE']
self.supported_form_factors = ["ARDUINO"]
class ARCH_LINK_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['ARCH_BLE', 'ARCH_LINK']
self.macros += ['TARGET_ARCH_BLE', 'TARGET_ARCH_LINK']
self.supported_form_factors = ["ARDUINO"]
class ARCH_LINK_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['ARCH_BLE', 'ARCH_LINK']
self.macros += ['TARGET_ARCH_BLE', 'TARGET_ARCH_LINK']
self.supported_form_factors = ["ARDUINO"]
class SEEED_TINY_BLE(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
class SEEED_TINY_BLE_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['SEEED_TINY_BLE']
self.macros += ['TARGET_SEEED_TINY_BLE']
class SEEED_TINY_BLE_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['SEEED_TINY_BLE']
self.macros += ['TARGET_SEEED_TINY_BLE']
class HRM1017(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
self.macros += ['TARGET_NRF_LFCLK_RC']
class HRM1017_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['HRM1017']
self.macros += ['TARGET_HRM1017', 'TARGET_NRF_LFCLK_RC']
class HRM1017_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['HRM1017']
self.macros += ['TARGET_HRM1017', 'TARGET_NRF_LFCLK_RC']
class RBLAB_NRF51822(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
self.supported_form_factors = ["ARDUINO"]
class RBLAB_NRF51822_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['RBLAB_NRF51822']
self.macros += ['TARGET_RBLAB_NRF51822']
self.supported_form_factors = ["ARDUINO"]
class RBLAB_NRF51822_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['RBLAB_NRF51822']
self.macros += ['TARGET_RBLAB_NRF51822']
self.supported_form_factors = ["ARDUINO"]
class RBLAB_BLENANO(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
class RBLAB_BLENANO_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['RBLAB_BLENANO']
self.macros += ['TARGET_RBLAB_BLENANO']
class RBLAB_BLENANO_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['RBLAB_BLENANO']
self.macros += ['TARGET_RBLAB_BLENANO']
class NRF51822_Y5_MBUG(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
class WALLBOT_BLE(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
class WALLBOT_BLE_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['WALLBOT_BLE']
self.macros += ['TARGET_WALLBOT_BLE']
class WALLBOT_BLE_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['WALLBOT_BLE']
self.macros += ['TARGET_WALLBOT_BLE']
class DELTA_DFCM_NNN40(MCU_NRF51_32K):
def __init__(self):
MCU_NRF51_32K.__init__(self)
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.macros += ['TARGET_NRF_LFCLK_RC']
def program_cycle_s(self):
return 10
class DELTA_DFCM_NNN40_BOOT(MCU_NRF51_32K_BOOT):
def __init__(self):
MCU_NRF51_32K_BOOT.__init__(self)
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.extra_labels += ['DELTA_DFCM_NNN40']
self.macros += ['TARGET_DELTA_DFCM_NNN40', 'TARGET_NRF_LFCLK_RC']
def program_cycle_s(self):
return 10
class DELTA_DFCM_NNN40_OTA(MCU_NRF51_32K_OTA):
def __init__(self):
MCU_NRF51_32K_OTA.__init__(self)
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.extra_labels += ['DELTA_DFCM_NNN40']
self.macros += ['TARGET_DELTA_DFCM_NNN40', 'TARGET_NRF_LFCLK_RC']
def program_cycle_s(self):
return 10
class NRF51_DK(MCU_NRF51_32K):
def __init__(self):
MCU_NRF51_32K.__init__(self)
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.supported_form_factors = ["ARDUINO"]
class NRF51_DK_BOOT(MCU_NRF51_32K_BOOT):
def __init__(self):
MCU_NRF51_32K_BOOT.__init__(self)
self.extra_labels = ['NRF51_DK']
self.macros += ['TARGET_NRF51_DK']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.supported_form_factors = ["ARDUINO"]
class NRF51_DK_OTA(MCU_NRF51_32K_OTA):
def __init__(self):
MCU_NRF51_32K_OTA.__init__(self)
self.extra_labels = ['NRF51_DK']
self.macros += ['TARGET_NRF51_DK']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.supported_form_factors = ["ARDUINO"]
class NRF51_DONGLE(MCU_NRF51_32K):
def __init__(self):
MCU_NRF51_32K.__init__(self)
class NRF51_DONGLE_BOOT(MCU_NRF51_32K_BOOT):
def __init__(self):
MCU_NRF51_32K_BOOT.__init__(self)
self.extra_labels = ['NRF51_DONGLE']
self.macros += ['TARGET_NRF51_DONGLE']
class NRF51_DONGLE_OTA(MCU_NRF51_32K_OTA):
def __init__(self):
MCU_NRF51_32K_OTA.__init__(self)
self.extra_labels = ['NRF51_DONGLE']
self.macros += ['TARGET_NRF51_DONGLE']
class NRF51_MICROBIT(MCU_NRF51_16K_S110):
def __init__(self):
MCU_NRF51_16K_S110.__init__(self)
self.EXPECTED_SOFTDEVICES_WITH_OFFSETS = [
{
'name' : 's110_nrf51822_8.0.0_softdevice.hex',
'boot' : 's110_nrf51822_8.0.0_bootloader.hex',
'offset' : 0x18000
},
{
'name' : 's110_nrf51822_7.1.0_softdevice.hex',
'boot' : 's110_nrf51822_7.1.0_bootloader.hex',
'offset' : 0x16000
}
]
self.macros += ['TARGET_NRF_LFCLK_RC']
class NRF51_MICROBIT_BOOT(MCU_NRF51_16K_BOOT_S110):
def __init__(self):
MCU_NRF51_16K_BOOT_S110.__init__(self)
self.extra_labels += ['NRF51_MICROBIT']
self.macros += ['TARGET_NRF51_MICROBIT', 'TARGET_NRF_LFCLK_RC']
class NRF51_MICROBIT_OTA(MCU_NRF51_16K_OTA_S110):
def __init__(self):
MCU_NRF51_16K_OTA_S110.__init__(self)
self.extra_labels += ['NRF51_MICROBIT']
self.macros += ['TARGET_NRF51_MICROBIT', 'TARGET_NRF_LFCLK_RC']
class NRF51_MICROBIT_B(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
self.extra_labels += ['NRF51_MICROBIT']
self.macros += ['TARGET_NRF51_MICROBIT', 'TARGET_NRF_LFCLK_RC']
class NRF51_MICROBIT_B_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['NRF51_MICROBIT']
self.macros += ['TARGET_NRF51_MICROBIT', 'TARGET_NRF_LFCLK_RC']
class NRF51_MICROBIT_B_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['NRF51_MICROBIT']
self.macros += ['TARGET_NRF51_MICROBIT', 'TARGET_NRF_LFCLK_RC']
### ARM ###
class ARM_MPS2_Target(Target):
def __init__(self):
Target.__init__(self)
class ARM_MPS2_M0(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M0']
self.macros = ['CMSDK_CM0']
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M0P(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M0P']
self.macros = ['CMSDK_CM0plus']
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M1(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M1"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M1']
self.macros = ['CMSDK_CM1']
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M3(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M3']
self.macros = ['CMSDK_CM3']
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M4(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M4']
self.macros = ['CMSDK_CM4']
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M7(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M7']
self.macros = ['CMSDK_CM7']
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_BEID(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_BEID']
self.macros = ['CMSDK_BEID']
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2(ARM_MPS2_M4):
pass
### Renesas ###
class RZ_A1H(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-A9"
self.extra_labels = ['RENESAS', 'MBRZA1H']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.supported_form_factors = ["ARDUINO"]
self.default_toolchain = "ARM"
def program_cycle_s(self):
return 2
### Maxim Integrated ###
class MAXWSNENV(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['Maxim', 'MAX32610']
self.macros = ['__SYSTEM_HFX=24000000']
self.supported_toolchains = ["GCC_ARM", "IAR", "ARM"]
self.default_toolchain = "ARM"
class MAX32600MBED(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['Maxim', 'MAX32600']
self.macros = ['__SYSTEM_HFX=24000000']
self.supported_toolchains = ["GCC_ARM", "IAR", "ARM"]
self.default_toolchain = "ARM"
### Silicon Labs ###
class EFM32GG_STK3700(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['Silicon_Labs', 'EFM32']
self.macros = ['EFM32GG990F1024']
self.supported_toolchains = ["GCC_ARM", "ARM", "uARM"]
self.default_toolchain = "ARM"
class EFM32LG_STK3600(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['Silicon_Labs', 'EFM32']
self.macros = ['EFM32LG990F256']
self.supported_toolchains = ["GCC_ARM", "ARM", "uARM"]
self.default_toolchain = "ARM"
class EFM32WG_STK3800(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['Silicon_Labs', 'EFM32']
self.macros = ['EFM32WG990F256']
self.supported_toolchains = ["GCC_ARM", "ARM", "uARM"]
self.default_toolchain = "ARM"
class EFM32ZG_STK3200(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Silicon_Labs', 'EFM32']
self.macros = ['EFM32ZG222F32']
self.supported_toolchains = ["GCC_ARM", "uARM"]
self.default_toolchain = "uARM"
class EFM32HG_STK3400(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Silicon_Labs', 'EFM32']
self.macros = ['EFM32HG322F64']
self.supported_toolchains = ["GCC_ARM", "uARM"]
self.default_toolchain = "uARM"
##WIZnet
class WIZWIKI_W7500(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['WIZNET', 'W7500x', 'WIZwiki_W7500']
self.supported_toolchains = ["uARM", "ARM"]
self.default_toolchain = "ARM"
self.supported_form_factors = ["ARDUINO"]
class SAMR21G18A(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Atmel', 'SAM_CortexM0+', 'SAMR21']
self.macros = ['__SAMR21G18A__', 'I2C_MASTER_CALLBACK_MODE=true', 'EXTINT_CALLBACK_MODE=true', 'USART_CALLBACK_MODE=true', 'TC_ASYNC=true']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "GCC_ARM"
class SAMD21J18A(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Atmel', 'SAM_CortexM0+', 'SAMD21']
self.macros = ['__SAMD21J18A__', 'I2C_MASTER_CALLBACK_MODE=true', 'EXTINT_CALLBACK_MODE=true', 'USART_CALLBACK_MODE=true', 'TC_ASYNC=true']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "GCC_ARM"
# Get a single instance for each target
TARGETS = [
### NXP ###
LPC11C24(),
LPC11U24(),
OC_MBUINO(), # LPC11U24
LPC11U24_301(),
LPC11U34_421(),
MICRONFCBOARD(), # LPC11U34_421
LPC11U35_401(),
LPC11U35_501(), # LPC11U35_501
LPC11U35_501_IBDAP(), # LPC11U35_501
XADOW_M0(), # LPC11U35_501
LPC11U35_Y5_MBUG(), # LPC11U35_501
LPC11U37_501(),
LPCCAPPUCCINO(), # LPC11U37_501
ARCH_GPRS(), # LPC11U37_501
LPC11U68(),
LPC1114(),
LPC1347(),
LPC1549(),
LPC1768(), # LPC1768
ARCH_PRO(), # LPC1768
UBLOX_C027(), # LPC1768
XBED_LPC1768(), # LPC1768
LPC2368(),
LPC2460(),
LPC810(),
LPC812(),
LPC824(),
SSCI824(), # LPC824
LPC4088(),
LPC4088_DM(),
LPC4330_M4(),
LPC4330_M0(),
LPC4337(),
LPC11U37H_401(),
### Freescale ###
KL05Z(),
KL25Z(),
KL26Z(),
KL43Z(),
KL46Z(),
K20D50M(),
TEENSY3_1(),
K22F(),
K64F(),
MTS_GAMBIT(), # FRDM K64F
### STMicro ###
NUCLEO_F030R8(),
NUCLEO_F070RB(),
NUCLEO_F072RB(),
NUCLEO_F091RC(),
NUCLEO_F103RB(),
NUCLEO_F302R8(),
NUCLEO_F303RE(),
NUCLEO_F334R8(),
NUCLEO_F401RE(),
NUCLEO_F411RE(),
ELMO_F411RE(),
NUCLEO_F446RE(),
NUCLEO_L053R8(),
NUCLEO_L073RZ(),
NUCLEO_L152RE(),
NUCLEO_L476RG(),
STM32F3XX(),
STM32F407(),
DISCO_F051R8(),
DISCO_F100RB(),
DISCO_F303VC(),
DISCO_F334C8(),
DISCO_F746NG(),
DISCO_F407VG(), # STM32F407
ARCH_MAX(), # STM32F407
DISCO_F429ZI(),
DISCO_L053C8(),
DISCO_L476VG(),
MTS_MDOT_F405RG(),
MTS_MDOT_F411RE(),
MOTE_L152RC(),
MTS_DRAGONFLY_F411RE(),
DISCO_F401VC(),
UBLOX_C029(), # STM32F439
NZ32SC151(), # STM32L151
### Nordic ###
NRF51822(), # nRF51_16K
NRF51822_BOOT(), # nRF51_16K
NRF51822_OTA(), # nRF51_16K
ARCH_BLE(), # nRF51_16K
ARCH_BLE_BOOT(), # nRF51_16K
ARCH_BLE_OTA(), # nRF51_16K
ARCH_LINK(), # nRF51_16K
ARCH_LINK_BOOT(), # nRF51_16K
ARCH_LINK_OTA(), # nRF51_16K
SEEED_TINY_BLE(), # nRF51_16K
SEEED_TINY_BLE_BOOT(), # nRF51_16K
SEEED_TINY_BLE_OTA(), # nRF51_16K
HRM1017(), # nRF51_16K
HRM1017_BOOT(), # nRF51_16K
HRM1017_OTA(), # nRF51_16K
RBLAB_NRF51822(), # nRF51_16K
RBLAB_NRF51822_BOOT(), # nRF51_16K
RBLAB_NRF51822_OTA(), # nRF51_16K
RBLAB_BLENANO(), # nRF51_16K
RBLAB_BLENANO_BOOT(), # nRF51_16K
RBLAB_BLENANO_OTA(), # nRF51_16K
NRF51822_Y5_MBUG(), # nRF51_16K
WALLBOT_BLE(), # nRF51_16K
WALLBOT_BLE_BOOT(), # nRF51_16K
WALLBOT_BLE_OTA(), # nRF51_16K
DELTA_DFCM_NNN40(), # nRF51_16K
DELTA_DFCM_NNN40_BOOT(),# nRF51_16K
DELTA_DFCM_NNN40_OTA(), # nRF51_16K
NRF51_DK(), # nRF51_32K
NRF51_DK_BOOT(), # nRF51_32K
NRF51_DK_OTA(), # nRF51_32K
NRF51_DONGLE(), # nRF51_32K
NRF51_DONGLE_BOOT(), # nRF51_32K
NRF51_DONGLE_OTA(), # nRF51_32K
NRF51_MICROBIT(), # nRF51_16K - S110
NRF51_MICROBIT_B(), # nRF51_16K - default
### ARM ###
ARM_MPS2_M0(),
ARM_MPS2_M0P(),
ARM_MPS2_M1(),
ARM_MPS2_M3(),
ARM_MPS2_M4(),
ARM_MPS2_M7(),
ARM_MPS2_BEID(),
ARM_MPS2(),
### Renesas ###
RZ_A1H(),
### Maxim Integrated ###
MAXWSNENV(),
MAX32600MBED(),
### Silicon Labs ###
EFM32GG_STK3700(),
EFM32LG_STK3600(),
EFM32WG_STK3800(),
EFM32ZG_STK3200(),
EFM32HG_STK3400(),
### WIZnet ###
WIZWIKI_W7500(),
### Atmel ###
SAMR21G18A(),
SAMD21J18A(),
]
# Map each target name to its unique instance
TARGET_MAP = {}
for t in TARGETS:
TARGET_MAP[t.name] = t
TARGET_NAMES = TARGET_MAP.keys()
# Some targets with different name have the same exporters
EXPORT_MAP = { }
# Detection APIs
def get_target_detect_codes():
""" Returns dictionary mapping detect_code -> platform_name
"""
result = {}
for target in TARGETS:
for detect_code in target.detect_code:
result[detect_code] = target.name
return result
|
|
# -*- coding: utf-8 -*-
"""
Module that creates wrapper around llvm functions. The wrapper is callable
from Python.
"""
from __future__ import print_function, division, absolute_import
import logging
logger = logging.getLogger(__name__)
import ast
import ctypes
import llvm.core
from numba import *
from numba import nodes
from numba import closures
from numba import typesystem
from numba import numbawrapper
from numba.functions import keep_alive
from numba.symtab import Variable
from numba.typesystem import is_obj
#------------------------------------------------------------------------
# Create a NumbaFunction (numbafunction.c)
#------------------------------------------------------------------------
def _create_methoddef(py_func, func_name, func_doc, func_pointer):
"""
Create a PyMethodDef ctypes struct.
struct PyMethodDef {
const char *ml_name; /* The name of the built-in function/method */
PyCFunction ml_meth; /* The C function that implements it */
int ml_flags; /* Combination of METH_xxx flags, which mostly
describe the args expected by the C func */
const char *ml_doc; /* The __doc__ attribute, or NULL */
};
"""
PyMethodDef = struct([('name', c_string_type),
('method', void.pointer()),
('flags', int_),
('doc', c_string_type)])
c_PyMethodDef = PyMethodDef.to_ctypes()
PyCFunction_NewEx = ctypes.pythonapi.PyCFunction_NewEx
PyCFunction_NewEx.argtypes = [ctypes.POINTER(c_PyMethodDef),
ctypes.py_object,
ctypes.c_void_p]
PyCFunction_NewEx.restype = ctypes.py_object
# It is paramount to put these into variables first, since every
# access may return a new string object!
keep_alive(py_func, func_name)
keep_alive(py_func, func_doc)
methoddef = c_PyMethodDef()
if PY3:
if func_name is not None:
func_name = func_name.encode('utf-8')
if func_doc is not None:
func_doc = func_doc.encode('utf-8')
methoddef.name = func_name
methoddef.doc = func_doc
methoddef.method = ctypes.c_void_p(func_pointer)
methoddef.flags = 1 # METH_VARARGS
return methoddef
def numbafunction_new(py_func, func_name, func_doc, module_name, func_pointer,
wrapped_lfunc_pointer, wrapped_signature):
"Create a NumbaFunction (numbafunction.c)"
methoddef = _create_methoddef(py_func, func_name, func_doc, func_pointer)
keep_alive(py_func, methoddef)
keep_alive(py_func, module_name)
wrapper = numbawrapper.create_function(methoddef, py_func,
wrapped_lfunc_pointer,
wrapped_signature, module_name)
return methoddef, wrapper
#------------------------------------------------------------------------
# Ctypes wrapping
#------------------------------------------------------------------------
def get_ctypes_func(self, llvm=True):
import ctypes
sig = self.func_signature
restype = typesystem.convert_to_ctypes(sig.return_type)
# FIXME: Switch to PYFUNCTYPE so it does not release the GIL.
#
# prototype = ctypes.CFUNCTYPE(restype,
# *[_types.convert_to_ctypes(x)
# for x in sig.args])
prototype = ctypes.PYFUNCTYPE(restype,
*[typesystem.convert_to_ctypes(x)
for x in sig.args])
if hasattr(restype, 'make_ctypes_prototype_wrapper'):
# See numba.utils.ComplexMixin for an example of
# make_ctypes_prototype_wrapper().
prototype = restype.make_ctypes_prototype_wrapper(prototype)
if llvm:
# July 10, 2012: PY_CALL_TO_LLVM_CALL_MAP is removed recent commit.
#
# PY_CALL_TO_LLVM_CALL_MAP[self.func] = \
# self.build_call_to_translated_function
return prototype(self.lfunc_pointer)
else:
return prototype(self.func)
#------------------------------------------------------------------------
# NumbaFunction Wrapping
#------------------------------------------------------------------------
def fake_pyfunc(self, args):
"PyObject *(*)(PyObject *self, PyObject *args)"
pass
def get_closure_scope(func_signature, func_obj):
"""
Retrieve the closure from the NumbaFunction from the func_closure
attribute.
func_signature:
signature of closure function
func_obj:
LLVM Value referencing the closure function as a Python object
"""
closure_scope_type = func_signature.args[0]
offset = numbawrapper.numbafunc_closure_field_offset
closure = nodes.LLVMValueRefNode(void.pointer(), func_obj)
closure = nodes.CoercionNode(closure, char.pointer())
closure_field = nodes.pointer_add(closure, nodes.const(offset, size_t))
closure_field = nodes.CoercionNode(closure_field,
closure_scope_type.pointer())
closure_scope = nodes.DereferenceNode(closure_field)
return closure_scope
def build_wrapper_function_ast(env, wrapper_lfunc, llvm_module):
"""
Build AST for LLVM function wrapper.
lfunc: LLVM function to wrap
llvm_module: module the wrapper is being defined in
The resulting AST has a NativeCallNode to the wrapped function. The
arguments are LLVMValueRefNode nodes which still need their llvm_value
set to the object from the tuple. This happens in visit_FunctionWrapperNode
during codegen.
"""
func = env.crnt.func
func_signature = env.crnt.func_signature
func_name = env.crnt.func_name
# Insert external declaration
lfunc = llvm_module.get_or_insert_function(
func_signature.to_llvm(env.context),
env.crnt.lfunc.name)
# Build AST
wrapper = nodes.FunctionWrapperNode(lfunc,
func_signature,
func,
fake_pyfunc,
func_name)
error_return = ast.Return(nodes.CoercionNode(nodes.NULL_obj,
object_))
is_closure = bool(closures.is_closure_signature(func_signature))
nargs = len(func_signature.args) - is_closure
# Call wrapped function with unpacked object arguments
# (delay actual arguments)
args = [nodes.LLVMValueRefNode(object_, None)
for i in range(nargs)]
if is_closure:
# Insert m_self as scope argument type
closure_scope = get_closure_scope(func_signature, wrapper_lfunc.args[0])
args.insert(0, closure_scope)
func_call = nodes.NativeCallNode(func_signature, args, lfunc)
if not is_obj(func_signature.return_type):
# Check for error using PyErr_Occurred()
func_call = nodes.PyErr_OccurredNode(func_call)
# Coerce and return result
if func_signature.return_type.is_void:
wrapper.body = func_call
result_node = nodes.ObjectInjectNode(None)
else:
wrapper.body = None
result_node = func_call
wrapper.return_result = ast.Return(value=nodes.CoercionNode(result_node,
object_))
# Update wrapper
wrapper.error_return = error_return
wrapper.cellvars = []
wrapper.wrapped_nargs = nargs
wrapper.wrapped_args = args[is_closure:]
return wrapper
def build_wrapper_translation(env, llvm_module=None):
"""
Generate a wrapper function in the given llvm module.
"""
from numba import pipeline
if llvm_module:
wrapper_module = llvm_module
else:
wrapper_module = env.llvm_context.module
# Create wrapper code generator and wrapper AST
func_name = '__numba_wrapper_%s' % env.crnt.func_name
signature = object_(void.pointer(), object_)
symtab = dict(self=Variable(object_, is_local=True),
args=Variable(object_, is_local=True))
func_env = env.crnt.inherit(
func=fake_pyfunc,
name=func_name,
mangled_name=None, # Force FunctionEnvironment.init()
# to generate a new mangled name.
func_signature=signature,
locals={},
symtab=symtab,
refcount_args=False,
llvm_module=wrapper_module)
# Create wrapper LLVM function
func_env.lfunc = pipeline.get_lfunc(env, func_env)
# Build wrapper ast
wrapper_node = build_wrapper_function_ast(env,
wrapper_lfunc=func_env.lfunc,
llvm_module=wrapper_module)
func_env.ast = wrapper_node
# Specialize and compile wrapper
pipeline.run_env(env, func_env, pipeline_name='late_translate')
keep_alive(fake_pyfunc, func_env.lfunc)
return func_env.translator # TODO: Amend callers to eat func_env
def build_wrapper_function(env):
'''
Build a wrapper function for the currently translated function.
Return the interpreter-level wrapper function, the LLVM wrapper function,
and the method definition record.
'''
t = build_wrapper_translation(env)
# Return a PyCFunctionObject holding the wrapper
func_pointer = t.lfunc_pointer
methoddef, wrapper = numbafunction_new(
env.crnt.func,
env.crnt.func_name,
env.crnt.func_doc,
env.crnt.translator.module_name,
func_pointer, # Wrapper
env.crnt.translator.lfunc_pointer, # Wrapped
env.crnt.func_signature)
return wrapper, t.lfunc, methoddef
def build_wrapper_module(env):
'''
Build a wrapper function for the currently translated
function, and return a tuple containing the separate LLVM
module, and the LLVM wrapper function.
'''
llvm_module = llvm.core.Module.new('%s_wrapper_module' % env.crnt.mangled_name)
t = build_wrapper_translation(env, llvm_module=llvm_module)
logger.debug('Wrapper module: %s' % llvm_module)
return llvm_module, t.lfunc
|
|
# ===============================================================================
# Copyright 2019 Jan Hendrickx and Gabriel Parrish
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
import numpy as np
from datetime import date, datetime
import yaml
import scandir
import time
# ============= standard library imports ========================
"""
In this script we grab all the .npy arrays generated in the stochastic grid search ETRM run. They are formatted as
ETRM_daily_eta_taw_150_seed_0_2000_1_1.npy where the last 3 are year month day. The numpy arrays of different seeds with
matching TAWs need to be averaged together to get the stochastic average. Then the stochastic average numpy arrays are
output with the format ETRM_daily_eta_taw_150_2000_1_1.npy, note the seed value will no longer be in there. This must
be done for each kind of daily output that has been generated by the ETRM
"""
def list_output(taw_list, taw_sorted, output_root, outname):
"""
:param taw_list:
:param rzsm_taw_sorted:
:param output_root:
:param outname:
:return:
"""
print 'outname is {}'.format(outname)
for t, tlist in zip(taw_list, taw_sorted):
print 'saving t {} to file'.format(t)
save_start = time.time()
with open(os.path.join(output_root, outname.format(t)), 'w') as wfile:
# firstseed = tlist[0]
# secondseed = tlist[1]
# thirdseed = tlist[2]
# fourthseed = tlist[3]
# fifthseed = tlist[4]
# sixthseed = tlist[5]
seeds = tlist[:6]
header = ','.join(['seed{}file,seed{}val,seed{}date'.format(i, i, i) for i in range(6)])
wfile.write('{}\n'.format(header))
# wfile.write('seed0file,seed0val,seed0date,seed1file,seed1val,seed1date,seed2file,seed2val,seed2date,'
# 'seed3file,seed3val,seed3date,seed4file,seed4val,seed4date,seed5file,seed5val,seed5date\n')
ost = time.time()
nseed = len(seeds[0])
adur = 0
for j, paths in enumerate(zip(*seeds)):
# print(j, len(paths))
# for j, paths in enumerate(tlist):
# for one, two, three, four, five, six in zip(firstseed, secondseed, thirdseed, fourthseed, fifthseed,
# sixthseed):
# one_arr = np.load(one[0])
# one_val = one_arr[1, 1]
# two_arr = np.load(two[0])
# two_val = two_arr[1, 1]
# three_arr = np.load(three[0])
# three_val = three_arr[1, 1]
# four_arr = np.load(four[0])
# four_val = four_arr[1, 1]
# five_arr = np.load(five[0])
# five_val = five_arr[1, 1]
# six_arr = np.load(six[0])
# six_val = six_arr[1, 1]
# row = []
# for path, date_val in paths:
# arr = np.load(path)
# row.extend((path, arr[1, 1], date_val))
st = time.time()
row = [i for p,d in paths for i in (os.path.basename(p), str(np.load(p)[1,1]), d.isoformat())]
row = ','.join(row)
wfile.write('{}\n'.format(row))
dur = time.time()-st
# # print('writing row {}, et={}, adur={},'
# 'timeleft={} t2={}'.format(j, dur, adur,
# adur*nseed-(time.time()-ost),
# adur*(nseed -j)
# ))
adur = (dur+adur)/2.
# wfile.write('{},{},{},'.format(one[0], one_val, one[1]))
#
# wfile.write('{},{},{},'.format(two[0], two_val, two[1]))
#
# wfile.write('{},{},{},'.format(three[0], three_val, three[1]))
#
# wfile.write('{},{},{},'.format(four[0], four_val, four[1]))
#
# wfile.write('{},{},{},'.format(five[0], five_val, five[1]))
#
# wfile.write('{},{},{}\n'.format(six[0], six_val, six[1]))
# wfile.write('{},{},{},{},{},{},{},{},{},{},{},{}\n'.format(one[0], one[1], two[0], two[1], three[0],
# three[1], four[0], four[1], five[0], five[1],
# six[0], six[1]))
save_elapsed = (time.time() - save_start)
print 'the elapsed time to write the csv {}'.format(save_elapsed)
def taw_sort(file_date_list, runs, taw_list):
taw_run_list = []
for taw in taw_list:
print 'taw {}'.format(taw)
taw_start = time.time()
taw_lst = []
for run in range(runs):
runlist = []
for f in file_date_list:
fpath, fdate = f
numpy_filename = os.path.split(fpath)[1]
numpy_seed = numpy_filename.split('_')[6]
numpy_taw = numpy_filename.split('_')[4]
if int(numpy_seed) == int(run) and int(numpy_taw) == int(taw):
runlist.append(f)
taw_lst.append(runlist)
taw_run_list.append(taw_lst)
taw_elapsed = (time.time() - taw_start)
print 'taw elapsed {}'.format(taw_elapsed)
return taw_run_list
def make_taw_list(taw_tup):
"""
makes a list of all the taws used
:param taw_tup: (begin_taw, end_taw, taw_step)
:return:
"""
# upack
begin_taw, end_taw, taw_step = taw_tup
taw_list = []
for i in range(0, ((end_taw - begin_taw) / taw_step)):
taw = begin_taw + (i * taw_step)
taw_list.append(taw)
return taw_list
def stochastic_filesort(stochastic_file_csv, taw_tup, var_list, model_dates, runs, output_root):
"""
This function will sort the numpy files saved by stochastic_file_finder into directories of time series of a given taw
:param stochastic_file_csv: csv created by stochastic file finder with path to each numpy file for a given model run
formatted as numpypath, date\n
:return:
"""
print 'doing a file sort on the csv created by stochastic file finder'
main_dictionary = {}
taw_list = make_taw_list(taw_tup)
open_read = time.time()
rzsm_lst = []
ro_lst = []
eta_lst = []
infil_lst = []
print 'opening'
with open(stochastic_file_csv, 'r') as rfile:
print 'iterating on lines'
line_start = time.time()
for j, line in enumerate(rfile):
line_item = line.split(',')
numpy_path = line_item[0]
string_date = line_item[1][:-1]
numpy_date = datetime.strptime(string_date, '%Y-%m-%d')
numpy_filename = os.path.split(numpy_path)[1]
# print numpy_filename
# print j, line
if 'rzsm' in numpy_filename:
rzsm_lst.append((numpy_path, numpy_date))
elif 'ro' in numpy_filename:
ro_lst.append((numpy_path, numpy_date))
elif 'eta' in numpy_filename:
eta_lst.append((numpy_path, numpy_date))
elif 'infil' in numpy_filename:
infil_lst.append((numpy_path, numpy_date))
# if j > 1000000:
# break
if not j%10000:
print j
print('file line count {}'.format(j))
line_end = (time.time() - line_start)
print 'line time elapsed {}'.format(line_end)
elapsed = (time.time() - open_read)
print 'time elapsed to parse {}'.format(elapsed)
# TODO now use sorted(list5, key=lambda vertex: (degree(vertex), vertex)) (firstkey, secondkey) tuple to sort by seed then TAW
# sorting by a tuple of first, second and third criteria (seed, taw, date)
def keyfunc(x):
return os.path.split(x[0])[1].split('_')[6], os.path.split(x[0])[1].split('_')[4], x[1]
rzsm_lst.sort(key=keyfunc)
ro_lst.sort(key=keyfunc)
eta_lst.sort(key=keyfunc)
infil_lst.sort(key=keyfunc)
print 'starting the taw sort'
sort_start = time.time()
ro_taw_sorted = taw_sort(ro_lst, runs, taw_list)
sort_elapsed = (time.time() - sort_start)
print 'sort elapsed {}'.format(sort_elapsed)
eta_taw_sorted = taw_sort(eta_lst, runs, taw_list)
sort_elapsed = (time.time() - sort_start)
print 'sort elapsed {}'.format(sort_elapsed)
infil_taw_sorted = taw_sort(infil_lst, runs, taw_list)
sort_elapsed = (time.time() - sort_start)
print 'sort elapsed {}'.format(sort_elapsed)
rzsm_taw_sorted = taw_sort(rzsm_lst, runs, taw_list)
sort_elapsed = (time.time() - sort_start)
print 'sort elapsed {}'.format(sort_elapsed)
# outname = '{}.csv'.format()
list_output(taw_list, ro_taw_sorted, output_root, outname='ro_taw_{}.csv')
list_output(taw_list, eta_taw_sorted, output_root, outname='eta_taw_{}.csv')
list_output(taw_list, infil_taw_sorted, output_root, outname='infil_taw_{}.csv')
list_output(taw_list, rzsm_taw_sorted, output_root, outname='rzsm_taw_{}.csv')
# todo - finish out this so you can extract the value by loading the array and multiplying through each seed by each taw.
def stochastic_file_finder(output_name, base_dir, output_dir, taw_tup, runs, arr_shape, output_vars):
"""
:param output_name:
:param base_dir:
:param output_dir:
:param taw_tup:
:param runs:
:param arr_shape:
:param output_vars:
:return:
"""
# upack
begin_taw, end_taw, taw_step = taw_tup
taw_list = []
for i in range(0, ((end_taw - begin_taw) / taw_step)):
taw = begin_taw + (i * taw_step)
taw_list.append(taw)
print 'taw list', taw_list
# # find which params there are
# swhc_dict = {}
#
# for swhc in taw_list:
# print 'swhc from list {}'.format(swhc)
#
# for seed in range(runs):
#
# print 'seed of run {}'.format(seed)
#
# seed_dict = {}
#
# for output_var in output_vars:
#
# print 'output var {}'.format(output_var)
#
# print ' making lists'
#
# lst_dates = []
# lst_files = []
#
# for path, dirs, files in os.walk(base_dir, topdown=False):
#
# if path.endswith('numpy_arrays') and len(files) > 0:
#
# print 'searching'
#
#
# for f in files:
#
# ex_taw = f.split('_')[4]
#
# # ex) infil, eta, ro etc.
# outvar = f.split('_')[2]
#
# # stochastic seed
# stoch_seed = f.split('_')[6]
#
# fname = f.split('.')[0]
# flist = fname.split('_')
#
# yr = int(flist[-3])
# mnth = int(flist[-2])
# dy = int(flist[-1])
#
# if int(ex_taw) == swhc:
#
# if int(stoch_seed) == seed:
#
# if output_var == outvar:
#
# lst_dates.append(date(yr, mnth, dy))
# lst_files.append(os.path.join(path, f))
#
# seed_dict[output_var] = (lst_dates, lst_files)
#
# print 'testing seed dict \n', seed_dict['infil'][0]
# print 'testing seed dict \n', seed_dict['infil'][1]
#
# print 'populating swhc dict with seed dict for given seed {}'.format(seed)
# swhc_dict['{}'.format(seed)] = seed_dict
#
#
# print 'testing \n', swhc_dict['0']['ro'][0]
#
# yaml_path = os.path.join(output_dir, '{}_files.yml'.format(output_name))
#
# with open(yaml_path, 'w') as wfile:
#
# yaml.dump(swhc_dict, wfile)
#
# return yaml_path
for path, dirs, files in scandir.walk(base_dir, topdown=False):
walk_start = time.time()
# print 'walking {}'.format(time.localtime(walk_start))
if path.endswith('numpy_arrays') and len(files) > 0:
print 'path \n {}'.format(path)
start_path = time.time()
# print 'start searching {}'.format(start_path)
print 'searching {}'.format(time.localtime(start_path))
all_files = []
all_dates = []
for f in files:
fname = f.split('.')[0]
flist = fname.split('_')
yr = int(flist[-3])
mnth = int(flist[-2])
dy = int(flist[-1])
all_files.append(os.path.join(path, f))
all_dates.append(date(yr, mnth, dy))
# print 'the length', len(all_files)
if os.path.isfile(os.path.join(output_dir, '{}_appended.csv'.format(output_name))):
start = time.time()
# print 'start writing {}'.format(start)
with open(os.path.join(output_dir, '{}_appended.csv'.format(output_name)), 'a') as wfile:
for f, d in zip(all_files, all_dates):
wfile.write('{},{}\n'.format(f, d))
elapsed = (time.time() - start)
# print 'write time: {}'.format(elapsed)
else:
start = time.time()
# print 'start writing {}'.format(start)
with open(os.path.join(output_dir, '{}_appended.csv'.format(output_name)), 'w') as wfile:
for f, d in zip(all_files, all_dates):
wfile.write('{},{}\n'.format(f, d))
elapsed = (time.time() - start)
# print 'write time: {}'.format(elapsed)
elapsed_path = (time.time() - start_path)
# print 'search time: {}'.format(elapsed_path)
# print 'done searching at {}'.format(time.localtime(time.time()))
walk_elapsed = time.time() - walk_start
# print 'walktime {}'.format(walk_elapsed)
print 'done {}'.format(datetime.now())
# print 'file read now making dict'
# swhc_dict = {}
# for taw in taw_list:
# print 'taw {}'.format(taw)
# seed_dict = {}
# for seed in range(runs):
#
# for outvar in output_vars:
#
# lst_dates = []
# lst_files = []
#
# # Tip: Just GET all the files together. Don't search over and over...
# # Do the sorting once all strings and dates are obtained.
#
# with open(os.path.join(output_dir, '{}.csv'.format(output_name)), 'r') as rfile:
# print 'reading'
#
# for line in rfile:
#
# line_lst = line.split(',')
#
# f = line_lst[0]
# d = line_lst[1]
#
# filename = os.path.split(f)[1]
#
# ex_taw = filename.split('_')[4]
#
# # ex) infil, eta, ro etc.
# otvar = filename.split('_')[2]
#
# # stochastic seed
# stoch_seed = filename.split('_')[6]
#
# if int(ex_taw) == taw:
#
# if int(stoch_seed) == seed:
#
# if outvar == otvar:
# lst_files.append(f)
#
# lst_dates.append(d)
#
#
# # for f, d in zip(all_files, all_dates):
# #
# # filename = os.path.split(f)[1]
# #
# # ex_taw = filename.split('_')[4]
# #
# # # ex) infil, eta, ro etc.
# # otvar = filename.split('_')[2]
# #
# # # stochastic seed
# # stoch_seed = filename.split('_')[6]
# #
# # if int(ex_taw) == taw:
# #
# # if int(stoch_seed) == seed:
# #
# # if outvar == otvar:
# #
# # lst_files.append(f)
# #
# # lst_dates.append(d)
#
# seed_dict[outvar] = (lst_dates, lst_files)
#
# swhc_dict['{}'.format(seed)] = seed_dict
#
# yaml_path = os.path.join(output_dir, '{}_files_speedtest.yml'.format(output_name))
#
# with open(yaml_path, 'w') as wfile:
#
# yaml.dump(swhc_dict, wfile)
#
# return yaml_path
if __name__ == '__main__':
# ================================= File Sorter ========================================
stochastic_csv_file = '/Users/dcadol/Desktop/academic_docs_II/calibration_approach/mini_model_outputs_III/wjs/wjs_appended.csv'
# starting TAW value
begin_taw = 25
# ending TAW value
end_taw = 925
# grid search step size. Each ETRM run will increase the uniform TAW of the RZSW holding capacity by this many mm.
taw_step = 25
taw_tup = (begin_taw, end_taw, taw_step)
# taw_tup = (25, 50, 25)
start_date = date(2000, 1, 1)
end_date = date(2013, 12, 30)
model_dates = (start_date, end_date)
# list of variables
var_list = ['rzsm', 'ro', 'eta', 'infil']
runs = 6
output = '/Users/dcadol/Desktop/academic_docs_II/calibration_approach/mini_model_outputs_III/wjs'
stochastic_filesort(stochastic_csv_file, taw_tup, var_list, model_dates, runs, output)
# ###### ========================== File Finder ===========================================
# print datetime.now()
# output_name = 'wjs'
#
# print 'DOING {}'.format(output_name)
#
# # base_dir = '/Users/dcadol/Desktop/mini_model_rsync/ses'
# # output_dir = '/Users/dcadol/Desktop/mini_model_rsync/ses'
# base_dir = '/Users/dcadol/Desktop/academic_docs_II/calibration_approach/mini_model_outputs_III/wjs'
# output_dir = '/Users/dcadol/Desktop/academic_docs_II/calibration_approach/mini_model_outputs_III/wjs'
#
# print base_dir
# print output_dir
#
# # starting TAW value
# begin_taw = 25
# # ending TAW value
# end_taw = 925
# # grid search step size. Each ETRM run will increase the uniform TAW of the RZSW holding capacity by this many mm.
# taw_step = 25
#
# taw_tup = (begin_taw, end_taw, taw_step)
#
# # number of the stochastic runs each time. Determines the seed by adding an interger to the original
# # config, giving each taw 10 seeds
# runs = 6
#
# # shape of the array
# arr_shape = (3, 3)
#
# output_vars = ['eta', 'rzsm', 'ro', 'infil']
#
# yaml_path = stochastic_file_finder(output_name, base_dir, output_dir, taw_tup, runs, arr_shape, output_vars)
|
|
#!/usr/bin/env python3
r"""
BMC redfish utility functions.
"""
import json
import re
from robot.libraries.BuiltIn import BuiltIn
import gen_print as gp
MTLS_ENABLED = BuiltIn().get_variable_value("${MTLS_ENABLED}")
class bmc_redfish_utils(object):
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
def __init__(self):
r"""
Initialize the bmc_redfish_utils object.
"""
# Obtain a reference to the global redfish object.
self.__inited__ = False
self._redfish_ = BuiltIn().get_library_instance('redfish')
if MTLS_ENABLED == 'True':
self.__inited__ = True
else:
# There is a possibility that a given driver support both redfish and
# legacy REST.
self._redfish_.login()
self._rest_response_ = \
self._redfish_.get("/xyz/openbmc_project/", valid_status_codes=[200, 404])
# If REST URL /xyz/openbmc_project/ is supported.
if self._rest_response_.status == 200:
self.__inited__ = True
BuiltIn().set_global_variable("${REDFISH_REST_SUPPORTED}", self.__inited__)
def get_redfish_session_info(self):
r"""
Returns redfish sessions info dictionary.
{
'key': 'yLXotJnrh5nDhXj5lLiH' ,
'location': '/redfish/v1/SessionService/Sessions/nblYY4wlz0'
}
"""
session_dict = {
"key": self._redfish_.get_session_key(),
"location": self._redfish_.get_session_location()
}
return session_dict
def get_attribute(self, resource_path, attribute, verify=None):
r"""
Get resource attribute.
Description of argument(s):
resource_path URI resource absolute path (e.g.
"/redfish/v1/Systems/1").
attribute Name of the attribute (e.g. 'PowerState').
"""
resp = self._redfish_.get(resource_path)
if verify:
if resp.dict[attribute] == verify:
return resp.dict[attribute]
else:
raise ValueError("Attribute value is not equal")
elif attribute in resp.dict:
return resp.dict[attribute]
return None
def get_properties(self, resource_path):
r"""
Returns dictionary of attributes for the resource.
Description of argument(s):
resource_path URI resource absolute path (e.g.
/redfish/v1/Systems/1").
"""
resp = self._redfish_.get(resource_path)
return resp.dict
def get_members_uri(self, resource_path, attribute):
r"""
Returns the list of valid path which has a given attribute.
Description of argument(s):
resource_path URI resource base path (e.g.
'/redfish/v1/Systems/',
'/redfish/v1/Chassis/').
attribute Name of the attribute (e.g. 'PowerSupplies').
"""
# Set quiet variable to keep subordinate get() calls quiet.
quiet = 1
# Get the member id list.
# e.g. ['/redfish/v1/Chassis/foo', '/redfish/v1/Chassis/bar']
resource_path_list = self.get_member_list(resource_path)
valid_path_list = []
for path_idx in resource_path_list:
# Get all the child object path under the member id e.g.
# ['/redfish/v1/Chassis/foo/Power','/redfish/v1/Chassis/bar/Power']
child_path_list = self.list_request(path_idx)
# Iterate and check if path object has the attribute.
for child_path_idx in child_path_list:
if ('JsonSchemas' in child_path_idx)\
or ('SessionService' in child_path_idx)\
or ('#' in child_path_idx):
continue
if self.get_attribute(child_path_idx, attribute):
valid_path_list.append(child_path_idx)
BuiltIn().log_to_console(valid_path_list)
return valid_path_list
def get_endpoint_path_list(self, resource_path, end_point_prefix):
r"""
Returns list with entries ending in "/endpoint".
Description of argument(s):
resource_path URI resource base path (e.g. "/redfish/v1/Chassis/").
end_point_prefix Name of the endpoint (e.g. 'Power').
Find all list entries ending in "/endpoint" combination such as
/redfish/v1/Chassis/<foo>/Power
/redfish/v1/Chassis/<bar>/Power
"""
end_point_list = self.list_request(resource_path)
# Regex to match entries ending in "/prefix" with optional underscore.
regex = ".*/" + end_point_prefix + "[_]?[0-9]*$"
return [x for x in end_point_list if re.match(regex, x, re.IGNORECASE)]
def get_target_actions(self, resource_path, target_attribute):
r"""
Returns resource target entry of the searched target attribute.
Description of argument(s):
resource_path URI resource absolute path
(e.g. "/redfish/v1/Systems/system").
target_attribute Name of the attribute (e.g. 'ComputerSystem.Reset').
Example:
"Actions": {
"#ComputerSystem.Reset": {
"ResetType@Redfish.AllowableValues": [
"On",
"ForceOff",
"GracefulRestart",
"GracefulShutdown"
],
"target": "/redfish/v1/Systems/system/Actions/ComputerSystem.Reset"
}
}
"""
global target_list
target_list = []
resp_dict = self.get_attribute(resource_path, "Actions")
if resp_dict is None:
return None
# Recursively search the "target" key in the nested dictionary.
# Populate the target_list of target entries.
self.get_key_value_nested_dict(resp_dict, "target")
# Return the matching target URL entry.
for target in target_list:
# target "/redfish/v1/Systems/system/Actions/ComputerSystem.Reset"
attribute_in_uri = target.rsplit('/', 1)[-1]
# attribute_in_uri "ComputerSystem.Reset"
if target_attribute == attribute_in_uri:
return target
return None
def get_member_list(self, resource_path):
r"""
Perform a GET list request and return available members entries.
Description of argument(s):
resource_path URI resource absolute path
(e.g. "/redfish/v1/SessionService/Sessions").
"Members": [
{
"@odata.id": "/redfish/v1/SessionService/Sessions/Z5HummWPZ7"
}
{
"@odata.id": "/redfish/v1/SessionService/Sessions/46CmQmEL7H"
}
],
"""
member_list = []
resp_list_dict = self.get_attribute(resource_path, "Members")
if resp_list_dict is None:
return member_list
for member_id in range(0, len(resp_list_dict)):
member_list.append(resp_list_dict[member_id]["@odata.id"])
return member_list
def list_request(self, resource_path):
r"""
Perform a GET list request and return available resource paths.
Description of argument(s):
resource_path URI resource absolute path
(e.g. "/redfish/v1/SessionService/Sessions").
"""
gp.qprint_executing(style=gp.func_line_style_short)
# Set quiet variable to keep subordinate get() calls quiet.
quiet = 1
self.__pending_enumeration = set()
self._rest_response_ = \
self._redfish_.get(resource_path,
valid_status_codes=[200, 404, 500])
# Return empty list.
if self._rest_response_.status != 200:
return self.__pending_enumeration
self.walk_nested_dict(self._rest_response_.dict)
if not self.__pending_enumeration:
return resource_path
for resource in self.__pending_enumeration.copy():
self._rest_response_ = \
self._redfish_.get(resource,
valid_status_codes=[200, 404, 500])
if self._rest_response_.status != 200:
continue
self.walk_nested_dict(self._rest_response_.dict)
return list(sorted(self.__pending_enumeration))
def enumerate_request(self, resource_path, return_json=1,
include_dead_resources=False):
r"""
Perform a GET enumerate request and return available resource paths.
Description of argument(s):
resource_path URI resource absolute path (e.g.
"/redfish/v1/SessionService/Sessions").
return_json Indicates whether the result should be
returned as a json string or as a
dictionary.
include_dead_resources Check and return a list of dead/broken URI
resources.
"""
gp.qprint_executing(style=gp.func_line_style_short)
return_json = int(return_json)
# Set quiet variable to keep subordinate get() calls quiet.
quiet = 1
# Variable to hold enumerated data.
self.__result = {}
# Variable to hold the pending list of resources for which enumeration.
# is yet to be obtained.
self.__pending_enumeration = set()
self.__pending_enumeration.add(resource_path)
# Variable having resources for which enumeration is completed.
enumerated_resources = set()
if include_dead_resources:
dead_resources = {}
resources_to_be_enumerated = (resource_path,)
while resources_to_be_enumerated:
for resource in resources_to_be_enumerated:
# JsonSchemas, SessionService or URLs containing # are not
# required in enumeration.
# Example: '/redfish/v1/JsonSchemas/' and sub resources.
# '/redfish/v1/SessionService'
# '/redfish/v1/Managers/bmc#/Oem'
if ('JsonSchemas' in resource) or ('SessionService' in resource)\
or ('PostCodes' in resource) or ('Registries' in resource)\
or ('Journal' in resource)\
or ('#' in resource):
continue
self._rest_response_ = \
self._redfish_.get(resource, valid_status_codes=[200, 404, 405, 500])
# Enumeration is done for available resources ignoring the
# ones for which response is not obtained.
if self._rest_response_.status != 200:
if include_dead_resources:
try:
dead_resources[self._rest_response_.status].append(
resource)
except KeyError:
dead_resources[self._rest_response_.status] = \
[resource]
continue
self.walk_nested_dict(self._rest_response_.dict, url=resource)
enumerated_resources.update(set(resources_to_be_enumerated))
resources_to_be_enumerated = \
tuple(self.__pending_enumeration - enumerated_resources)
if return_json:
if include_dead_resources:
return json.dumps(self.__result, sort_keys=True,
indent=4, separators=(',', ': ')), dead_resources
else:
return json.dumps(self.__result, sort_keys=True,
indent=4, separators=(',', ': '))
else:
if include_dead_resources:
return self.__result, dead_resources
else:
return self.__result
def walk_nested_dict(self, data, url=''):
r"""
Parse through the nested dictionary and get the resource id paths.
Description of argument(s):
data Nested dictionary data from response message.
url Resource for which the response is obtained in data.
"""
url = url.rstrip('/')
for key, value in data.items():
# Recursion if nested dictionary found.
if isinstance(value, dict):
self.walk_nested_dict(value)
else:
# Value contains a list of dictionaries having member data.
if 'Members' == key:
if isinstance(value, list):
for memberDict in value:
if isinstance(memberDict, str):
self.__pending_enumeration.add(memberDict)
else:
self.__pending_enumeration.add(memberDict['@odata.id'])
if '@odata.id' == key:
value = value.rstrip('/')
# Data for the given url.
if value == url:
self.__result[url] = data
# Data still needs to be looked up,
else:
self.__pending_enumeration.add(value)
def get_key_value_nested_dict(self, data, key):
r"""
Parse through the nested dictionary and get the searched key value.
Description of argument(s):
data Nested dictionary data from response message.
key Search dictionary key element.
"""
for k, v in data.items():
if isinstance(v, dict):
self.get_key_value_nested_dict(v, key)
if k == key:
target_list.append(v)
|
|
# Generated from java-escape by ANTLR 4.5
# encoding: utf-8
from __future__ import print_function
from antlr4 import *
from io import StringIO
package = globals().get("__package__", None)
ischild = len(package)>0 if package is not None else False
if ischild:
from .QueryListener import QueryListener
from .QueryVisitor import QueryVisitor
else:
from QueryListener import QueryListener
from QueryVisitor import QueryVisitor
def serializedATN():
with StringIO() as buf:
buf.write(u"\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3")
buf.write(u"\27\u00da\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write(u"\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t")
buf.write(u"\r\4\16\t\16\3\2\5\2\36\n\2\3\2\3\2\5\2\"\n\2\3\2\3\2")
buf.write(u"\5\2&\n\2\7\2(\n\2\f\2\16\2+\13\2\3\3\3\3\3\3\3\3\3\3")
buf.write(u"\3\3\3\3\5\3\64\n\3\5\3\66\n\3\3\4\3\4\3\4\3\4\7\4<\n")
buf.write(u"\4\f\4\16\4?\13\4\3\5\3\5\3\5\5\5D\n\5\3\6\3\6\3\6\3")
buf.write(u"\6\5\6J\n\6\3\6\3\6\3\6\3\6\6\6P\n\6\r\6\16\6Q\5\6T\n")
buf.write(u"\6\3\7\3\7\6\7X\n\7\r\7\16\7Y\3\7\3\7\3\7\7\7_\n\7\f")
buf.write(u"\7\16\7b\13\7\3\7\3\7\3\7\7\7g\n\7\f\7\16\7j\13\7\5\7")
buf.write(u"l\n\7\3\b\3\b\3\b\5\bq\n\b\3\t\3\t\3\t\3\t\3\t\5\tx\n")
buf.write(u"\t\3\t\3\t\5\t|\n\t\3\t\3\t\5\t\u0080\n\t\3\t\7\t\u0083")
buf.write(u"\n\t\f\t\16\t\u0086\13\t\3\t\5\t\u0089\n\t\3\t\5\t\u008c")
buf.write(u"\n\t\3\t\5\t\u008f\n\t\3\t\3\t\5\t\u0093\n\t\3\n\3\n")
buf.write(u"\3\13\3\13\3\13\3\f\3\f\3\f\5\f\u009d\n\f\3\f\3\f\5\f")
buf.write(u"\u00a1\n\f\3\f\3\f\3\f\3\f\5\f\u00a7\n\f\3\f\3\f\5\f")
buf.write(u"\u00ab\n\f\3\f\3\f\5\f\u00af\n\f\3\f\3\f\5\f\u00b3\n")
buf.write(u"\f\3\f\3\f\3\f\3\f\5\f\u00b9\n\f\3\f\3\f\5\f\u00bd\n")
buf.write(u"\f\3\f\3\f\5\f\u00c1\n\f\3\f\3\f\5\f\u00c5\n\f\3\r\3")
buf.write(u"\r\3\16\6\16\u00ca\n\16\r\16\16\16\u00cb\3\16\6\16\u00cf")
buf.write(u"\n\16\r\16\16\16\u00d0\3\16\6\16\u00d4\n\16\r\16\16\16")
buf.write(u"\u00d5\5\16\u00d8\n\16\3\16\2\2\17\2\4\6\b\n\f\16\20")
buf.write(u"\22\24\26\30\32\2\3\3\3\3\3\u00f9\2)\3\2\2\2\4\65\3\2")
buf.write(u"\2\2\6\67\3\2\2\2\bC\3\2\2\2\nS\3\2\2\2\fk\3\2\2\2\16")
buf.write(u"p\3\2\2\2\20\u0092\3\2\2\2\22\u0094\3\2\2\2\24\u0096")
buf.write(u"\3\2\2\2\26\u00c4\3\2\2\2\30\u00c6\3\2\2\2\32\u00d7\3")
buf.write(u"\2\2\2\34\36\5\32\16\2\35\34\3\2\2\2\35\36\3\2\2\2\36")
buf.write(u"\37\3\2\2\2\37!\5\4\3\2 \"\5\32\16\2! \3\2\2\2!\"\3\2")
buf.write(u"\2\2\"#\3\2\2\2#%\t\2\2\2$&\5\32\16\2%$\3\2\2\2%&\3\2")
buf.write(u"\2\2&(\3\2\2\2\'\35\3\2\2\2(+\3\2\2\2)\'\3\2\2\2)*\3")
buf.write(u"\2\2\2*\3\3\2\2\2+)\3\2\2\2,\66\5\6\4\2-.\7\4\2\2./\5")
buf.write(u"\32\16\2/\60\5\22\n\2\60\61\5\32\16\2\61\63\5\6\4\2\62")
buf.write(u"\64\5\32\16\2\63\62\3\2\2\2\63\64\3\2\2\2\64\66\3\2\2")
buf.write(u"\2\65,\3\2\2\2\65-\3\2\2\2\66\5\3\2\2\2\67=\5\b\5\28")
buf.write(u"9\5\32\16\29:\5\b\5\2:<\3\2\2\2;8\3\2\2\2<?\3\2\2\2=")
buf.write(u";\3\2\2\2=>\3\2\2\2>\7\3\2\2\2?=\3\2\2\2@A\7\5\2\2AD")
buf.write(u"\5\n\6\2BD\5\n\6\2C@\3\2\2\2CB\3\2\2\2D\t\3\2\2\2EF\7")
buf.write(u"\6\2\2FG\5\6\4\2GI\7\7\2\2HJ\5\26\f\2IH\3\2\2\2IJ\3\2")
buf.write(u"\2\2JT\3\2\2\2KT\5\f\7\2LO\5\f\7\2MN\7\b\2\2NP\5\f\7")
buf.write(u"\2OM\3\2\2\2PQ\3\2\2\2QO\3\2\2\2QR\3\2\2\2RT\3\2\2\2")
buf.write(u"SE\3\2\2\2SK\3\2\2\2SL\3\2\2\2T\13\3\2\2\2UV\7\t\2\2")
buf.write(u"VX\5\16\b\2WU\3\2\2\2XY\3\2\2\2YW\3\2\2\2YZ\3\2\2\2Z")
buf.write(u"l\3\2\2\2[`\5\16\b\2\\]\7\t\2\2]_\5\16\b\2^\\\3\2\2\2")
buf.write(u"_b\3\2\2\2`^\3\2\2\2`a\3\2\2\2al\3\2\2\2b`\3\2\2\2ch")
buf.write(u"\5\24\13\2de\7\t\2\2eg\5\16\b\2fd\3\2\2\2gj\3\2\2\2h")
buf.write(u"f\3\2\2\2hi\3\2\2\2il\3\2\2\2jh\3\2\2\2kW\3\2\2\2k[\3")
buf.write(u"\2\2\2kc\3\2\2\2l\r\3\2\2\2mn\7\5\2\2nq\5\20\t\2oq\5")
buf.write(u"\20\t\2pm\3\2\2\2po\3\2\2\2q\17\3\2\2\2r\u0093\7\21\2")
buf.write(u"\2s\u0093\7\23\2\2t\u0093\7\22\2\2uw\7\n\2\2vx\5\32\16")
buf.write(u"\2wv\3\2\2\2wx\3\2\2\2xy\3\2\2\2y{\5\20\t\2z|\5\32\16")
buf.write(u"\2{z\3\2\2\2{|\3\2\2\2|\u0084\3\2\2\2}\177\7\13\2\2~")
buf.write(u"\u0080\5\32\16\2\177~\3\2\2\2\177\u0080\3\2\2\2\u0080")
buf.write(u"\u0081\3\2\2\2\u0081\u0083\5\20\t\2\u0082}\3\2\2\2\u0083")
buf.write(u"\u0086\3\2\2\2\u0084\u0082\3\2\2\2\u0084\u0085\3\2\2")
buf.write(u"\2\u0085\u0088\3\2\2\2\u0086\u0084\3\2\2\2\u0087\u0089")
buf.write(u"\5\32\16\2\u0088\u0087\3\2\2\2\u0088\u0089\3\2\2\2\u0089")
buf.write(u"\u008b\3\2\2\2\u008a\u008c\7\13\2\2\u008b\u008a\3\2\2")
buf.write(u"\2\u008b\u008c\3\2\2\2\u008c\u008e\3\2\2\2\u008d\u008f")
buf.write(u"\5\32\16\2\u008e\u008d\3\2\2\2\u008e\u008f\3\2\2\2\u008f")
buf.write(u"\u0090\3\2\2\2\u0090\u0091\7\f\2\2\u0091\u0093\3\2\2")
buf.write(u"\2\u0092r\3\2\2\2\u0092s\3\2\2\2\u0092t\3\2\2\2\u0092")
buf.write(u"u\3\2\2\2\u0093\21\3\2\2\2\u0094\u0095\7\23\2\2\u0095")
buf.write(u"\23\3\2\2\2\u0096\u0097\7\r\2\2\u0097\u0098\5\30\r\2")
buf.write(u"\u0098\25\3\2\2\2\u0099\u00c5\7\16\2\2\u009a\u009c\7")
buf.write(u"\17\2\2\u009b\u009d\5\32\16\2\u009c\u009b\3\2\2\2\u009c")
buf.write(u"\u009d\3\2\2\2\u009d\u009e\3\2\2\2\u009e\u00a0\5\30\r")
buf.write(u"\2\u009f\u00a1\5\32\16\2\u00a0\u009f\3\2\2\2\u00a0\u00a1")
buf.write(u"\3\2\2\2\u00a1\u00a2\3\2\2\2\u00a2\u00a3\7\20\2\2\u00a3")
buf.write(u"\u00c5\3\2\2\2\u00a4\u00a6\7\17\2\2\u00a5\u00a7\5\32")
buf.write(u"\16\2\u00a6\u00a5\3\2\2\2\u00a6\u00a7\3\2\2\2\u00a7\u00a8")
buf.write(u"\3\2\2\2\u00a8\u00aa\5\30\r\2\u00a9\u00ab\5\32\16\2\u00aa")
buf.write(u"\u00a9\3\2\2\2\u00aa\u00ab\3\2\2\2\u00ab\u00ac\3\2\2")
buf.write(u"\2\u00ac\u00ae\7\13\2\2\u00ad\u00af\5\32\16\2\u00ae\u00ad")
buf.write(u"\3\2\2\2\u00ae\u00af\3\2\2\2\u00af\u00b0\3\2\2\2\u00b0")
buf.write(u"\u00b2\5\30\r\2\u00b1\u00b3\5\32\16\2\u00b2\u00b1\3\2")
buf.write(u"\2\2\u00b2\u00b3\3\2\2\2\u00b3\u00b4\3\2\2\2\u00b4\u00b5")
buf.write(u"\7\20\2\2\u00b5\u00c5\3\2\2\2\u00b6\u00b8\7\17\2\2\u00b7")
buf.write(u"\u00b9\5\32\16\2\u00b8\u00b7\3\2\2\2\u00b8\u00b9\3\2")
buf.write(u"\2\2\u00b9\u00ba\3\2\2\2\u00ba\u00bc\7\13\2\2\u00bb\u00bd")
buf.write(u"\5\32\16\2\u00bc\u00bb\3\2\2\2\u00bc\u00bd\3\2\2\2\u00bd")
buf.write(u"\u00be\3\2\2\2\u00be\u00c0\5\30\r\2\u00bf\u00c1\5\32")
buf.write(u"\16\2\u00c0\u00bf\3\2\2\2\u00c0\u00c1\3\2\2\2\u00c1\u00c2")
buf.write(u"\3\2\2\2\u00c2\u00c3\7\20\2\2\u00c3\u00c5\3\2\2\2\u00c4")
buf.write(u"\u0099\3\2\2\2\u00c4\u009a\3\2\2\2\u00c4\u00a4\3\2\2")
buf.write(u"\2\u00c4\u00b6\3\2\2\2\u00c5\27\3\2\2\2\u00c6\u00c7\7")
buf.write(u"\22\2\2\u00c7\31\3\2\2\2\u00c8\u00ca\7\24\2\2\u00c9\u00c8")
buf.write(u"\3\2\2\2\u00ca\u00cb\3\2\2\2\u00cb\u00c9\3\2\2\2\u00cb")
buf.write(u"\u00cc\3\2\2\2\u00cc\u00d8\3\2\2\2\u00cd\u00cf\7\26\2")
buf.write(u"\2\u00ce\u00cd\3\2\2\2\u00cf\u00d0\3\2\2\2\u00d0\u00ce")
buf.write(u"\3\2\2\2\u00d0\u00d1\3\2\2\2\u00d1\u00d8\3\2\2\2\u00d2")
buf.write(u"\u00d4\7\27\2\2\u00d3\u00d2\3\2\2\2\u00d4\u00d5\3\2\2")
buf.write(u"\2\u00d5\u00d3\3\2\2\2\u00d5\u00d6\3\2\2\2\u00d6\u00d8")
buf.write(u"\3\2\2\2\u00d7\u00c9\3\2\2\2\u00d7\u00ce\3\2\2\2\u00d7")
buf.write(u"\u00d3\3\2\2\2\u00d8\33\3\2\2\2(\35!%)\63\65=CIQSY`h")
buf.write(u"kpw{\177\u0084\u0088\u008b\u008e\u0092\u009c\u00a0\u00a6")
buf.write(u"\u00aa\u00ae\u00b2\u00b8\u00bc\u00c0\u00c4\u00cb\u00d0")
buf.write(u"\u00d5\u00d7")
return buf.getvalue()
class QueryParser ( Parser ):
grammarFileName = "java-escape"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ u"<INVALID>", u"';'", u"'define'", u"'~'", u"'('",
u"')'", u"'::'", u"':'", u"'['", u"','", u"']'", u"'\\'",
u"'?'", u"'{'", u"'}'" ]
symbolicNames = [ u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>",
u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>",
u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>",
u"<INVALID>", u"<INVALID>", u"<INVALID>", u"ExplicitRegex",
u"Digits", u"Symbol", u"WhiteSpace", u"NewLine", u"BlockComment",
u"LineComment" ]
RULE_queryDocument = 0
RULE_documentItem = 1
RULE_query = 2
RULE_termNegatable = 3
RULE_term = 4
RULE_matchFunction = 5
RULE_functionalNegatable = 6
RULE_functional = 7
RULE_identifier = 8
RULE_backReference = 9
RULE_quantifier = 10
RULE_number = 11
RULE_ws = 12
ruleNames = [ u"queryDocument", u"documentItem", u"query", u"termNegatable",
u"term", u"matchFunction", u"functionalNegatable", u"functional",
u"identifier", u"backReference", u"quantifier", u"number",
u"ws" ]
EOF = Token.EOF
T__0=1
T__1=2
T__2=3
T__3=4
T__4=5
T__5=6
T__6=7
T__7=8
T__8=9
T__9=10
T__10=11
T__11=12
T__12=13
T__13=14
ExplicitRegex=15
Digits=16
Symbol=17
WhiteSpace=18
NewLine=19
BlockComment=20
LineComment=21
def __init__(self, input):
super(QueryParser, self).__init__(input)
self.checkVersion("4.5")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class QueryDocumentContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(QueryParser.QueryDocumentContext, self).__init__(parent, invokingState)
self.parser = parser
def documentItem(self, i=None):
if i is None:
return self.getTypedRuleContexts(QueryParser.DocumentItemContext)
else:
return self.getTypedRuleContext(QueryParser.DocumentItemContext,i)
def EOF(self, i=None):
if i is None:
return self.getTokens(QueryParser.EOF)
else:
return self.getToken(QueryParser.EOF, i)
def ws(self, i=None):
if i is None:
return self.getTypedRuleContexts(QueryParser.WsContext)
else:
return self.getTypedRuleContext(QueryParser.WsContext,i)
def getRuleIndex(self):
return QueryParser.RULE_queryDocument
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterQueryDocument(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitQueryDocument(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitQueryDocument(self)
else:
return visitor.visitChildren(self)
def queryDocument(self):
localctx = QueryParser.QueryDocumentContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_queryDocument)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 39
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QueryParser.T__1) | (1 << QueryParser.T__2) | (1 << QueryParser.T__3) | (1 << QueryParser.T__6) | (1 << QueryParser.T__7) | (1 << QueryParser.T__10) | (1 << QueryParser.ExplicitRegex) | (1 << QueryParser.Digits) | (1 << QueryParser.Symbol) | (1 << QueryParser.WhiteSpace) | (1 << QueryParser.BlockComment) | (1 << QueryParser.LineComment))) != 0):
self.state = 27
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QueryParser.WhiteSpace) | (1 << QueryParser.BlockComment) | (1 << QueryParser.LineComment))) != 0):
self.state = 26
self.ws()
self.state = 29
self.documentItem()
self.state = 31
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QueryParser.WhiteSpace) | (1 << QueryParser.BlockComment) | (1 << QueryParser.LineComment))) != 0):
self.state = 30
self.ws()
self.state = 33
_la = self._input.LA(1)
if not(_la==QueryParser.EOF or _la==QueryParser.T__0):
self._errHandler.recoverInline(self)
else:
self.consume()
self.state = 35
la_ = self._interp.adaptivePredict(self._input,2,self._ctx)
if la_ == 1:
self.state = 34
self.ws()
self.state = 41
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DocumentItemContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(QueryParser.DocumentItemContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return QueryParser.RULE_documentItem
def copyFrom(self, ctx):
super(QueryParser.DocumentItemContext, self).copyFrom(ctx)
class DefinitionContext(DocumentItemContext):
def __init__(self, parser, ctx): # actually a QueryParser.DocumentItemContext)
super(QueryParser.DefinitionContext, self).__init__(parser)
self.copyFrom(ctx)
def ws(self, i=None):
if i is None:
return self.getTypedRuleContexts(QueryParser.WsContext)
else:
return self.getTypedRuleContext(QueryParser.WsContext,i)
def identifier(self):
return self.getTypedRuleContext(QueryParser.IdentifierContext,0)
def query(self):
return self.getTypedRuleContext(QueryParser.QueryContext,0)
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterDefinition(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitDefinition(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitDefinition(self)
else:
return visitor.visitChildren(self)
class QueryStatementContext(DocumentItemContext):
def __init__(self, parser, ctx): # actually a QueryParser.DocumentItemContext)
super(QueryParser.QueryStatementContext, self).__init__(parser)
self.copyFrom(ctx)
def query(self):
return self.getTypedRuleContext(QueryParser.QueryContext,0)
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterQueryStatement(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitQueryStatement(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitQueryStatement(self)
else:
return visitor.visitChildren(self)
def documentItem(self):
localctx = QueryParser.DocumentItemContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_documentItem)
try:
self.state = 51
token = self._input.LA(1)
if token in [QueryParser.T__2, QueryParser.T__3, QueryParser.T__6, QueryParser.T__7, QueryParser.T__10, QueryParser.ExplicitRegex, QueryParser.Digits, QueryParser.Symbol]:
localctx = QueryParser.QueryStatementContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 42
self.query()
elif token in [QueryParser.T__1]:
localctx = QueryParser.DefinitionContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 43
self.match(QueryParser.T__1)
self.state = 44
self.ws()
self.state = 45
self.identifier()
self.state = 46
self.ws()
self.state = 47
self.query()
self.state = 49
la_ = self._interp.adaptivePredict(self._input,4,self._ctx)
if la_ == 1:
self.state = 48
self.ws()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class QueryContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(QueryParser.QueryContext, self).__init__(parent, invokingState)
self.parser = parser
def termNegatable(self, i=None):
if i is None:
return self.getTypedRuleContexts(QueryParser.TermNegatableContext)
else:
return self.getTypedRuleContext(QueryParser.TermNegatableContext,i)
def ws(self, i=None):
if i is None:
return self.getTypedRuleContexts(QueryParser.WsContext)
else:
return self.getTypedRuleContext(QueryParser.WsContext,i)
def getRuleIndex(self):
return QueryParser.RULE_query
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterQuery(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitQuery(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitQuery(self)
else:
return visitor.visitChildren(self)
def query(self):
localctx = QueryParser.QueryContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_query)
try:
self.enterOuterAlt(localctx, 1)
self.state = 53
self.termNegatable()
self.state = 59
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,6,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 54
self.ws()
self.state = 55
self.termNegatable()
self.state = 61
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,6,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TermNegatableContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(QueryParser.TermNegatableContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return QueryParser.RULE_termNegatable
def copyFrom(self, ctx):
super(QueryParser.TermNegatableContext, self).copyFrom(ctx)
class PositiveTermContext(TermNegatableContext):
def __init__(self, parser, ctx): # actually a QueryParser.TermNegatableContext)
super(QueryParser.PositiveTermContext, self).__init__(parser)
self.copyFrom(ctx)
def term(self):
return self.getTypedRuleContext(QueryParser.TermContext,0)
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterPositiveTerm(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitPositiveTerm(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitPositiveTerm(self)
else:
return visitor.visitChildren(self)
class NegativeTermContext(TermNegatableContext):
def __init__(self, parser, ctx): # actually a QueryParser.TermNegatableContext)
super(QueryParser.NegativeTermContext, self).__init__(parser)
self.copyFrom(ctx)
def term(self):
return self.getTypedRuleContext(QueryParser.TermContext,0)
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterNegativeTerm(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitNegativeTerm(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitNegativeTerm(self)
else:
return visitor.visitChildren(self)
def termNegatable(self):
localctx = QueryParser.TermNegatableContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_termNegatable)
try:
self.state = 65
la_ = self._interp.adaptivePredict(self._input,7,self._ctx)
if la_ == 1:
localctx = QueryParser.NegativeTermContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 62
self.match(QueryParser.T__2)
self.state = 63
self.term()
pass
elif la_ == 2:
localctx = QueryParser.PositiveTermContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 64
self.term()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TermContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(QueryParser.TermContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return QueryParser.RULE_term
def copyFrom(self, ctx):
super(QueryParser.TermContext, self).copyFrom(ctx)
class SimpleTermContext(TermContext):
def __init__(self, parser, ctx): # actually a QueryParser.TermContext)
super(QueryParser.SimpleTermContext, self).__init__(parser)
self.copyFrom(ctx)
def matchFunction(self):
return self.getTypedRuleContext(QueryParser.MatchFunctionContext,0)
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterSimpleTerm(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitSimpleTerm(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitSimpleTerm(self)
else:
return visitor.visitChildren(self)
class IntersectionTermContext(TermContext):
def __init__(self, parser, ctx): # actually a QueryParser.TermContext)
super(QueryParser.IntersectionTermContext, self).__init__(parser)
self.copyFrom(ctx)
def matchFunction(self, i=None):
if i is None:
return self.getTypedRuleContexts(QueryParser.MatchFunctionContext)
else:
return self.getTypedRuleContext(QueryParser.MatchFunctionContext,i)
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterIntersectionTerm(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitIntersectionTerm(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitIntersectionTerm(self)
else:
return visitor.visitChildren(self)
class SubQueryTermContext(TermContext):
def __init__(self, parser, ctx): # actually a QueryParser.TermContext)
super(QueryParser.SubQueryTermContext, self).__init__(parser)
self.copyFrom(ctx)
def query(self):
return self.getTypedRuleContext(QueryParser.QueryContext,0)
def quantifier(self):
return self.getTypedRuleContext(QueryParser.QuantifierContext,0)
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterSubQueryTerm(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitSubQueryTerm(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitSubQueryTerm(self)
else:
return visitor.visitChildren(self)
def term(self):
localctx = QueryParser.TermContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_term)
self._la = 0 # Token type
try:
self.state = 81
la_ = self._interp.adaptivePredict(self._input,10,self._ctx)
if la_ == 1:
localctx = QueryParser.SubQueryTermContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 67
self.match(QueryParser.T__3)
self.state = 68
self.query()
self.state = 69
self.match(QueryParser.T__4)
self.state = 71
_la = self._input.LA(1)
if _la==QueryParser.T__11 or _la==QueryParser.T__12:
self.state = 70
self.quantifier()
pass
elif la_ == 2:
localctx = QueryParser.SimpleTermContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 73
self.matchFunction()
pass
elif la_ == 3:
localctx = QueryParser.IntersectionTermContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 74
self.matchFunction()
self.state = 77
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 75
self.match(QueryParser.T__5)
self.state = 76
self.matchFunction()
self.state = 79
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==QueryParser.T__5):
break
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class MatchFunctionContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(QueryParser.MatchFunctionContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return QueryParser.RULE_matchFunction
def copyFrom(self, ctx):
super(QueryParser.MatchFunctionContext, self).copyFrom(ctx)
class DirectApplyContext(MatchFunctionContext):
def __init__(self, parser, ctx): # actually a QueryParser.MatchFunctionContext)
super(QueryParser.DirectApplyContext, self).__init__(parser)
self.copyFrom(ctx)
def functionalNegatable(self, i=None):
if i is None:
return self.getTypedRuleContexts(QueryParser.FunctionalNegatableContext)
else:
return self.getTypedRuleContext(QueryParser.FunctionalNegatableContext,i)
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterDirectApply(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitDirectApply(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitDirectApply(self)
else:
return visitor.visitChildren(self)
class BackReferenceApplyContext(MatchFunctionContext):
def __init__(self, parser, ctx): # actually a QueryParser.MatchFunctionContext)
super(QueryParser.BackReferenceApplyContext, self).__init__(parser)
self.copyFrom(ctx)
def backReference(self):
return self.getTypedRuleContext(QueryParser.BackReferenceContext,0)
def functionalNegatable(self, i=None):
if i is None:
return self.getTypedRuleContexts(QueryParser.FunctionalNegatableContext)
else:
return self.getTypedRuleContext(QueryParser.FunctionalNegatableContext,i)
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterBackReferenceApply(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitBackReferenceApply(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitBackReferenceApply(self)
else:
return visitor.visitChildren(self)
class MatchApplyContext(MatchFunctionContext):
def __init__(self, parser, ctx): # actually a QueryParser.MatchFunctionContext)
super(QueryParser.MatchApplyContext, self).__init__(parser)
self.copyFrom(ctx)
def functionalNegatable(self, i=None):
if i is None:
return self.getTypedRuleContexts(QueryParser.FunctionalNegatableContext)
else:
return self.getTypedRuleContext(QueryParser.FunctionalNegatableContext,i)
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterMatchApply(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitMatchApply(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitMatchApply(self)
else:
return visitor.visitChildren(self)
def matchFunction(self):
localctx = QueryParser.MatchFunctionContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_matchFunction)
self._la = 0 # Token type
try:
self.state = 105
token = self._input.LA(1)
if token in [QueryParser.T__6]:
localctx = QueryParser.DirectApplyContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 85
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 83
self.match(QueryParser.T__6)
self.state = 84
self.functionalNegatable()
self.state = 87
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==QueryParser.T__6):
break
elif token in [QueryParser.T__2, QueryParser.T__7, QueryParser.ExplicitRegex, QueryParser.Digits, QueryParser.Symbol]:
localctx = QueryParser.MatchApplyContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 89
self.functionalNegatable()
self.state = 94
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==QueryParser.T__6:
self.state = 90
self.match(QueryParser.T__6)
self.state = 91
self.functionalNegatable()
self.state = 96
self._errHandler.sync(self)
_la = self._input.LA(1)
elif token in [QueryParser.T__10]:
localctx = QueryParser.BackReferenceApplyContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 97
self.backReference()
self.state = 102
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==QueryParser.T__6:
self.state = 98
self.match(QueryParser.T__6)
self.state = 99
self.functionalNegatable()
self.state = 104
self._errHandler.sync(self)
_la = self._input.LA(1)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FunctionalNegatableContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(QueryParser.FunctionalNegatableContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return QueryParser.RULE_functionalNegatable
def copyFrom(self, ctx):
super(QueryParser.FunctionalNegatableContext, self).copyFrom(ctx)
class NegativeFunctionalContext(FunctionalNegatableContext):
def __init__(self, parser, ctx): # actually a QueryParser.FunctionalNegatableContext)
super(QueryParser.NegativeFunctionalContext, self).__init__(parser)
self.copyFrom(ctx)
def functional(self):
return self.getTypedRuleContext(QueryParser.FunctionalContext,0)
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterNegativeFunctional(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitNegativeFunctional(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitNegativeFunctional(self)
else:
return visitor.visitChildren(self)
class PositiveFunctionalContext(FunctionalNegatableContext):
def __init__(self, parser, ctx): # actually a QueryParser.FunctionalNegatableContext)
super(QueryParser.PositiveFunctionalContext, self).__init__(parser)
self.copyFrom(ctx)
def functional(self):
return self.getTypedRuleContext(QueryParser.FunctionalContext,0)
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterPositiveFunctional(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitPositiveFunctional(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitPositiveFunctional(self)
else:
return visitor.visitChildren(self)
def functionalNegatable(self):
localctx = QueryParser.FunctionalNegatableContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_functionalNegatable)
try:
self.state = 110
token = self._input.LA(1)
if token in [QueryParser.T__2]:
localctx = QueryParser.NegativeFunctionalContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 107
self.match(QueryParser.T__2)
self.state = 108
self.functional()
elif token in [QueryParser.T__7, QueryParser.ExplicitRegex, QueryParser.Digits, QueryParser.Symbol]:
localctx = QueryParser.PositiveFunctionalContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 109
self.functional()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FunctionalContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(QueryParser.FunctionalContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return QueryParser.RULE_functional
def copyFrom(self, ctx):
super(QueryParser.FunctionalContext, self).copyFrom(ctx)
class ListFunctionalContext(FunctionalContext):
def __init__(self, parser, ctx): # actually a QueryParser.FunctionalContext)
super(QueryParser.ListFunctionalContext, self).__init__(parser)
self.copyFrom(ctx)
def functional(self, i=None):
if i is None:
return self.getTypedRuleContexts(QueryParser.FunctionalContext)
else:
return self.getTypedRuleContext(QueryParser.FunctionalContext,i)
def ws(self, i=None):
if i is None:
return self.getTypedRuleContexts(QueryParser.WsContext)
else:
return self.getTypedRuleContext(QueryParser.WsContext,i)
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterListFunctional(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitListFunctional(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitListFunctional(self)
else:
return visitor.visitChildren(self)
class ImplicitFunctionalContext(FunctionalContext):
def __init__(self, parser, ctx): # actually a QueryParser.FunctionalContext)
super(QueryParser.ImplicitFunctionalContext, self).__init__(parser)
self.copyFrom(ctx)
def Symbol(self):
return self.getToken(QueryParser.Symbol, 0)
def Digits(self):
return self.getToken(QueryParser.Digits, 0)
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterImplicitFunctional(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitImplicitFunctional(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitImplicitFunctional(self)
else:
return visitor.visitChildren(self)
class ExplicitFunctionalContext(FunctionalContext):
def __init__(self, parser, ctx): # actually a QueryParser.FunctionalContext)
super(QueryParser.ExplicitFunctionalContext, self).__init__(parser)
self.copyFrom(ctx)
def ExplicitRegex(self):
return self.getToken(QueryParser.ExplicitRegex, 0)
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterExplicitFunctional(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitExplicitFunctional(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitExplicitFunctional(self)
else:
return visitor.visitChildren(self)
def functional(self):
localctx = QueryParser.FunctionalContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_functional)
self._la = 0 # Token type
try:
self.state = 144
token = self._input.LA(1)
if token in [QueryParser.ExplicitRegex]:
localctx = QueryParser.ExplicitFunctionalContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 112
self.match(QueryParser.ExplicitRegex)
elif token in [QueryParser.Symbol]:
localctx = QueryParser.ImplicitFunctionalContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 113
self.match(QueryParser.Symbol)
elif token in [QueryParser.Digits]:
localctx = QueryParser.ImplicitFunctionalContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 114
self.match(QueryParser.Digits)
elif token in [QueryParser.T__7]:
localctx = QueryParser.ListFunctionalContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 115
self.match(QueryParser.T__7)
self.state = 117
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QueryParser.WhiteSpace) | (1 << QueryParser.BlockComment) | (1 << QueryParser.LineComment))) != 0):
self.state = 116
self.ws()
self.state = 119
self.functional()
self.state = 121
la_ = self._interp.adaptivePredict(self._input,17,self._ctx)
if la_ == 1:
self.state = 120
self.ws()
self.state = 130
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,19,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 123
self.match(QueryParser.T__8)
self.state = 125
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QueryParser.WhiteSpace) | (1 << QueryParser.BlockComment) | (1 << QueryParser.LineComment))) != 0):
self.state = 124
self.ws()
self.state = 127
self.functional()
self.state = 132
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,19,self._ctx)
self.state = 134
la_ = self._interp.adaptivePredict(self._input,20,self._ctx)
if la_ == 1:
self.state = 133
self.ws()
self.state = 137
_la = self._input.LA(1)
if _la==QueryParser.T__8:
self.state = 136
self.match(QueryParser.T__8)
self.state = 140
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QueryParser.WhiteSpace) | (1 << QueryParser.BlockComment) | (1 << QueryParser.LineComment))) != 0):
self.state = 139
self.ws()
self.state = 142
self.match(QueryParser.T__9)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IdentifierContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(QueryParser.IdentifierContext, self).__init__(parent, invokingState)
self.parser = parser
def Symbol(self):
return self.getToken(QueryParser.Symbol, 0)
def getRuleIndex(self):
return QueryParser.RULE_identifier
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterIdentifier(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitIdentifier(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitIdentifier(self)
else:
return visitor.visitChildren(self)
def identifier(self):
localctx = QueryParser.IdentifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_identifier)
try:
self.enterOuterAlt(localctx, 1)
self.state = 146
self.match(QueryParser.Symbol)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BackReferenceContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(QueryParser.BackReferenceContext, self).__init__(parent, invokingState)
self.parser = parser
def number(self):
return self.getTypedRuleContext(QueryParser.NumberContext,0)
def getRuleIndex(self):
return QueryParser.RULE_backReference
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterBackReference(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitBackReference(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitBackReference(self)
else:
return visitor.visitChildren(self)
def backReference(self):
localctx = QueryParser.BackReferenceContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_backReference)
try:
self.enterOuterAlt(localctx, 1)
self.state = 148
self.match(QueryParser.T__10)
self.state = 149
self.number()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class QuantifierContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(QueryParser.QuantifierContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return QueryParser.RULE_quantifier
def copyFrom(self, ctx):
super(QueryParser.QuantifierContext, self).copyFrom(ctx)
class Quantifier0toNContext(QuantifierContext):
def __init__(self, parser, ctx): # actually a QueryParser.QuantifierContext)
super(QueryParser.Quantifier0toNContext, self).__init__(parser)
self.copyFrom(ctx)
def number(self):
return self.getTypedRuleContext(QueryParser.NumberContext,0)
def ws(self, i=None):
if i is None:
return self.getTypedRuleContexts(QueryParser.WsContext)
else:
return self.getTypedRuleContext(QueryParser.WsContext,i)
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterQuantifier0toN(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitQuantifier0toN(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitQuantifier0toN(self)
else:
return visitor.visitChildren(self)
class QuantifierNContext(QuantifierContext):
def __init__(self, parser, ctx): # actually a QueryParser.QuantifierContext)
super(QueryParser.QuantifierNContext, self).__init__(parser)
self.copyFrom(ctx)
def number(self):
return self.getTypedRuleContext(QueryParser.NumberContext,0)
def ws(self, i=None):
if i is None:
return self.getTypedRuleContexts(QueryParser.WsContext)
else:
return self.getTypedRuleContext(QueryParser.WsContext,i)
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterQuantifierN(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitQuantifierN(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitQuantifierN(self)
else:
return visitor.visitChildren(self)
class QuantifierZeroOrOneContext(QuantifierContext):
def __init__(self, parser, ctx): # actually a QueryParser.QuantifierContext)
super(QueryParser.QuantifierZeroOrOneContext, self).__init__(parser)
self.copyFrom(ctx)
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterQuantifierZeroOrOne(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitQuantifierZeroOrOne(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitQuantifierZeroOrOne(self)
else:
return visitor.visitChildren(self)
class QuantifierNtoMContext(QuantifierContext):
def __init__(self, parser, ctx): # actually a QueryParser.QuantifierContext)
super(QueryParser.QuantifierNtoMContext, self).__init__(parser)
self.copyFrom(ctx)
def number(self, i=None):
if i is None:
return self.getTypedRuleContexts(QueryParser.NumberContext)
else:
return self.getTypedRuleContext(QueryParser.NumberContext,i)
def ws(self, i=None):
if i is None:
return self.getTypedRuleContexts(QueryParser.WsContext)
else:
return self.getTypedRuleContext(QueryParser.WsContext,i)
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterQuantifierNtoM(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitQuantifierNtoM(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitQuantifierNtoM(self)
else:
return visitor.visitChildren(self)
def quantifier(self):
localctx = QueryParser.QuantifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_quantifier)
self._la = 0 # Token type
try:
self.state = 194
la_ = self._interp.adaptivePredict(self._input,33,self._ctx)
if la_ == 1:
localctx = QueryParser.QuantifierZeroOrOneContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 151
self.match(QueryParser.T__11)
pass
elif la_ == 2:
localctx = QueryParser.QuantifierNContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 152
self.match(QueryParser.T__12)
self.state = 154
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QueryParser.WhiteSpace) | (1 << QueryParser.BlockComment) | (1 << QueryParser.LineComment))) != 0):
self.state = 153
self.ws()
self.state = 156
self.number()
self.state = 158
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QueryParser.WhiteSpace) | (1 << QueryParser.BlockComment) | (1 << QueryParser.LineComment))) != 0):
self.state = 157
self.ws()
self.state = 160
self.match(QueryParser.T__13)
pass
elif la_ == 3:
localctx = QueryParser.QuantifierNtoMContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 162
self.match(QueryParser.T__12)
self.state = 164
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QueryParser.WhiteSpace) | (1 << QueryParser.BlockComment) | (1 << QueryParser.LineComment))) != 0):
self.state = 163
self.ws()
self.state = 166
self.number()
self.state = 168
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QueryParser.WhiteSpace) | (1 << QueryParser.BlockComment) | (1 << QueryParser.LineComment))) != 0):
self.state = 167
self.ws()
self.state = 170
self.match(QueryParser.T__8)
self.state = 172
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QueryParser.WhiteSpace) | (1 << QueryParser.BlockComment) | (1 << QueryParser.LineComment))) != 0):
self.state = 171
self.ws()
self.state = 174
self.number()
self.state = 176
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QueryParser.WhiteSpace) | (1 << QueryParser.BlockComment) | (1 << QueryParser.LineComment))) != 0):
self.state = 175
self.ws()
self.state = 178
self.match(QueryParser.T__13)
pass
elif la_ == 4:
localctx = QueryParser.Quantifier0toNContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 180
self.match(QueryParser.T__12)
self.state = 182
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QueryParser.WhiteSpace) | (1 << QueryParser.BlockComment) | (1 << QueryParser.LineComment))) != 0):
self.state = 181
self.ws()
self.state = 184
self.match(QueryParser.T__8)
self.state = 186
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QueryParser.WhiteSpace) | (1 << QueryParser.BlockComment) | (1 << QueryParser.LineComment))) != 0):
self.state = 185
self.ws()
self.state = 188
self.number()
self.state = 190
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QueryParser.WhiteSpace) | (1 << QueryParser.BlockComment) | (1 << QueryParser.LineComment))) != 0):
self.state = 189
self.ws()
self.state = 192
self.match(QueryParser.T__13)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NumberContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(QueryParser.NumberContext, self).__init__(parent, invokingState)
self.parser = parser
def Digits(self):
return self.getToken(QueryParser.Digits, 0)
def getRuleIndex(self):
return QueryParser.RULE_number
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterNumber(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitNumber(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitNumber(self)
else:
return visitor.visitChildren(self)
def number(self):
localctx = QueryParser.NumberContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_number)
try:
self.enterOuterAlt(localctx, 1)
self.state = 196
self.match(QueryParser.Digits)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class WsContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(QueryParser.WsContext, self).__init__(parent, invokingState)
self.parser = parser
def WhiteSpace(self, i=None):
if i is None:
return self.getTokens(QueryParser.WhiteSpace)
else:
return self.getToken(QueryParser.WhiteSpace, i)
def BlockComment(self, i=None):
if i is None:
return self.getTokens(QueryParser.BlockComment)
else:
return self.getToken(QueryParser.BlockComment, i)
def LineComment(self, i=None):
if i is None:
return self.getTokens(QueryParser.LineComment)
else:
return self.getToken(QueryParser.LineComment, i)
def getRuleIndex(self):
return QueryParser.RULE_ws
def enterRule(self, listener):
if isinstance( listener, QueryListener ):
listener.enterWs(self)
def exitRule(self, listener):
if isinstance( listener, QueryListener ):
listener.exitWs(self)
def accept(self, visitor):
if isinstance( visitor, QueryVisitor ):
return visitor.visitWs(self)
else:
return visitor.visitChildren(self)
def ws(self):
localctx = QueryParser.WsContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_ws)
try:
self.state = 213
token = self._input.LA(1)
if token in [QueryParser.WhiteSpace]:
self.enterOuterAlt(localctx, 1)
self.state = 199
self._errHandler.sync(self)
_alt = 1
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 198
self.match(QueryParser.WhiteSpace)
else:
raise NoViableAltException(self)
self.state = 201
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,34,self._ctx)
elif token in [QueryParser.BlockComment]:
self.enterOuterAlt(localctx, 2)
self.state = 204
self._errHandler.sync(self)
_alt = 1
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 203
self.match(QueryParser.BlockComment)
else:
raise NoViableAltException(self)
self.state = 206
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,35,self._ctx)
elif token in [QueryParser.LineComment]:
self.enterOuterAlt(localctx, 3)
self.state = 209
self._errHandler.sync(self)
_alt = 1
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 208
self.match(QueryParser.LineComment)
else:
raise NoViableAltException(self)
self.state = 211
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,36,self._ctx)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
|
|
from __future__ import unicode_literals
import napalm_yang
import pytest
import json
import os
import sys
import logging
logger = logging.getLogger("napalm-yang")
def config_logging():
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter("%(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
# config_logging()
BASE_PATH = os.path.dirname(__file__)
test_populating_from_file = [["eos"], ["junos"]]
def read_file_content(filename):
full_path = os.path.join(BASE_PATH, "tutorial_data", filename)
with open(full_path, "r") as f:
return f.read()
def save_file_content(filename, content):
full_path = os.path.join(BASE_PATH, "tutorial_data", filename)
with open(full_path, "w") as f:
return f.write(content)
def read_json(filename):
return json.loads(read_file_content(filename))
class Tests(object):
def test_create_binding(self):
config = napalm_yang.base.Root()
# Adding models to the object
config.add_model(napalm_yang.models.openconfig_interfaces)
config.add_model(napalm_yang.models.openconfig_vlan)
assert (
napalm_yang.utils.model_to_dict(config)
== read_json("test_create_binding.expected")
)
def test_populate_models_programmatically(self):
config = napalm_yang.base.Root()
config.add_model(napalm_yang.models.openconfig_interfaces)
et1 = config.interfaces.interface.add("et1")
et1.config.description = "My description"
et1.config.mtu = 1500
assert et1.config.description == "My description"
assert et1.config.mtu == 1500
config.interfaces.interface.add("et2")
config.interfaces.interface["et2"].config.description = "Another description"
config.interfaces.interface["et2"].config.mtu = 9000
assert (
config.interfaces.interface["et2"].config.description
== "Another description"
)
assert config.interfaces.interface["et2"].config.mtu == 9000
assert (
config.get(filter=True)
== read_json("test_populate_models_programmatically.expected")
)
with pytest.raises(ValueError):
et1.config.mtu = -1
assert_against = [("et1", "My description"), ("et2", "Another description")]
for iface, data in config.interfaces.interface.items():
expected = assert_against.pop(0)
assert iface == expected[0]
assert data.config.description == expected[1]
assert not assert_against, "We didn't iterate over all the interfaces"
assert list(config.interfaces.interface.keys()) == ["et1", "et2"]
config.interfaces.interface.delete("et1")
assert list(config.interfaces.interface.keys()) == ["et2"]
def test_populating_from_a_dict(self):
config = napalm_yang.base.Root()
config.add_model(napalm_yang.models.openconfig_vlan)
vlans_dict = {
"vlans": {
"vlan": {
100: {"config": {"vlan_id": 100, "name": "production"}},
200: {"config": {"vlan_id": 200, "name": "dev"}},
}
}
}
config.load_dict(vlans_dict)
assert sorted(list(config.vlans.vlan.keys())) == [100, 200]
assert config.vlans.vlan[100].config.name == "production"
assert config.vlans.vlan[200].config.name == "dev"
@pytest.mark.parametrize("profile", test_populating_from_file)
def test_populating_from_file(self, profile):
config_path = "test_populating_from_file/{}/config.txt".format(profile[0])
expected_path = "test_populating_from_file/{}/expected.json".format(profile[0])
config = napalm_yang.base.Root()
config.add_model(napalm_yang.models.openconfig_interfaces)
config.parse_config(native=[read_file_content(config_path)], profile=profile)
# print(json.dumps(config.get(filter=True), indent=4))
assert config.get(filter=True) == read_json(expected_path)
def test_translating_models(self):
candidate = napalm_yang.base.Root()
candidate.add_model(napalm_yang.models.openconfig_interfaces())
def create_iface(candidate, name, description, mtu, prefix, prefix_length):
interface = candidate.interfaces.interface.add(name)
interface.config.description = description
interface.config.mtu = mtu
ip = interface.routed_vlan.ipv4.addresses.address.add(prefix)
ip.config.ip = prefix
ip.config.prefix_length = prefix_length
create_iface(candidate, "et1", "Uplink1", 9000, "192.168.1.1", 24)
create_iface(candidate, "et2", "Uplink2", 9000, "192.168.2.1", 24)
expected_junos = read_file_content("test_translating_models_junos.expected")
expected_eos = read_file_content("test_translating_models_eos.expected")
assert candidate.translate_config(profile=["junos"]) == expected_junos
assert candidate.translate_config(profile=["eos"]) == expected_eos
def test_advanced_manipulation_junos(self):
profile = ["junos"]
config_file = "test_advanced_manipulation/{}/config.txt".format(profile[0])
merge_file = "test_advanced_manipulation/{}/merge.expected".format(profile[0])
replace_file = "test_advanced_manipulation/{}/replace.expected".format(
profile[0]
)
candidate = napalm_yang.base.Root()
candidate.add_model(napalm_yang.models.openconfig_interfaces)
candidate.parse_config(native=[read_file_content(config_file)], profile=profile)
# now let's do a few changes, let's remove lo0.0 and create lo0.1
candidate.interfaces.interface["lo0"].subinterfaces.subinterface.delete("0")
lo1 = candidate.interfaces.interface["lo0"].subinterfaces.subinterface.add("1")
lo1.config.description = "new loopback"
# Let's also default the mtu of ge-0/0/0 which is set to 1400
candidate.interfaces.interface["ge-0/0/0"].config._unset_mtu()
# We will also need a running configuration to compare against
running = napalm_yang.base.Root()
running.add_model(napalm_yang.models.openconfig_interfaces)
running.parse_config(native=[read_file_content(config_file)], profile=profile)
merge_config = candidate.translate_config(profile=profile, merge=running)
# save_file_content(merge_file, merge_config)
assert merge_config == read_file_content(merge_file)
replace_config = candidate.translate_config(profile=profile, replace=running)
# save_file_content(replace_file, replace_config)
assert replace_config == read_file_content(replace_file)
def test_advanced_manipulation_eos(self):
profile = ["eos"]
config_file = "test_advanced_manipulation/{}/config.txt".format(profile[0])
merge_file = "test_advanced_manipulation/{}/merge.expected".format(profile[0])
replace_file = "test_advanced_manipulation/{}/replace.expected".format(
profile[0]
)
candidate = napalm_yang.base.Root()
candidate.add_model(napalm_yang.models.openconfig_interfaces)
candidate.parse_config(native=[read_file_content(config_file)], profile=profile)
# now let's do a few changes, let's remove lo1 and create lo0
candidate.interfaces.interface.delete("Loopback1")
lo0 = candidate.interfaces.interface.add("Loopback0")
lo0.config.description = "new loopback"
# Let's also default the mtu of ge-0/0/0 which is set to 1400
candidate.interfaces.interface["Port-Channel1"].config._unset_mtu()
# We will also need a running configuration to compare against
running = napalm_yang.base.Root()
running.add_model(napalm_yang.models.openconfig_interfaces)
running.parse_config(native=[read_file_content(config_file)], profile=profile)
merge_config = candidate.translate_config(profile=profile, merge=running)
# save_file_content(merge_file, merge_config)
assert merge_config == read_file_content(merge_file)
replace_config = candidate.translate_config(profile=profile, replace=running)
# save_file_content(replace_file, replace_config)
assert replace_config == read_file_content(replace_file)
def test_diffing_objects(self):
profile = ["eos"]
config_file = "test_diffing_objects/config.txt"
expected_file = "test_diffing_objects/expected.json"
candidate = napalm_yang.base.Root()
candidate.add_model(napalm_yang.models.openconfig_interfaces)
candidate.parse_config(native=[read_file_content(config_file)], profile=profile)
# now let's do a few changes, let's remove lo1 and create lo0
candidate.interfaces.interface.delete("Loopback1")
lo0 = candidate.interfaces.interface.add("Loopback0")
lo0.config.description = "new loopback"
# Let's also default the mtu of ge-0/0/0 which is set to 1400
candidate.interfaces.interface["Port-Channel1"].config._unset_mtu()
# We will also need a running configuration to compare against
running = napalm_yang.base.Root()
running.add_model(napalm_yang.models.openconfig_interfaces)
running.parse_config(native=[read_file_content(config_file)], profile=profile)
assert napalm_yang.utils.diff(candidate, running) == read_json(expected_file)
|
|
import random
import pytest
import nlp
from nlp import loadPageHTML, stripRawHTML, findOutlinks, onlyWikipediaURLS
from nlp import expand_pages, relevant_pages, normalize, ConvergenceDetector, getInLinks
from nlp import getOutLinks, Page, determineInlinks, HITS
from nlp import Rules, Lexicon, Grammar, ProbRules, ProbLexicon, ProbGrammar
from nlp import Chart, CYK_parse
# Clumsy imports because we want to access certain nlp.py globals explicitly, because
# they are accessed by functions within nlp.py
from unittest.mock import patch
from io import BytesIO
random.seed("aima-python")
def test_rules():
check = {'A': [['B', 'C'], ['D', 'E']], 'B': [['E'], ['a'], ['b', 'c']]}
assert Rules(A="B C | D E", B="E | a | b c") == check
def test_lexicon():
check = {'Article': ['the', 'a', 'an'], 'Pronoun': ['i', 'you', 'he']}
lexicon = Lexicon(Article="the | a | an", Pronoun="i | you | he")
assert lexicon == check
def test_grammar():
rules = Rules(A="B C | D E", B="E | a | b c")
lexicon = Lexicon(Article="the | a | an", Pronoun="i | you | he")
grammar = Grammar("Simplegram", rules, lexicon)
assert grammar.rewrites_for('A') == [['B', 'C'], ['D', 'E']]
assert grammar.isa('the', 'Article')
grammar = nlp.E_Chomsky
for rule in grammar.cnf_rules():
assert len(rule) == 3
def test_generation():
lexicon = Lexicon(Article="the | a | an",
Pronoun="i | you | he")
rules = Rules(
S="Article | More | Pronoun",
More="Article Pronoun | Pronoun Pronoun"
)
grammar = Grammar("Simplegram", rules, lexicon)
sentence = grammar.generate_random('S')
for token in sentence.split():
found = False
for non_terminal, terminals in grammar.lexicon.items():
if token in terminals:
found = True
assert found
def test_prob_rules():
check = {'A': [(['B', 'C'], 0.3), (['D', 'E'], 0.7)],
'B': [(['E'], 0.1), (['a'], 0.2), (['b', 'c'], 0.7)]}
rules = ProbRules(A="B C [0.3] | D E [0.7]", B="E [0.1] | a [0.2] | b c [0.7]")
assert rules == check
def test_prob_lexicon():
check = {'Article': [('the', 0.5), ('a', 0.25), ('an', 0.25)],
'Pronoun': [('i', 0.4), ('you', 0.3), ('he', 0.3)]}
lexicon = ProbLexicon(Article="the [0.5] | a [0.25] | an [0.25]",
Pronoun="i [0.4] | you [0.3] | he [0.3]")
assert lexicon == check
def test_prob_grammar():
rules = ProbRules(A="B C [0.3] | D E [0.7]", B="E [0.1] | a [0.2] | b c [0.7]")
lexicon = ProbLexicon(Article="the [0.5] | a [0.25] | an [0.25]",
Pronoun="i [0.4] | you [0.3] | he [0.3]")
grammar = ProbGrammar("Simplegram", rules, lexicon)
assert grammar.rewrites_for('A') == [(['B', 'C'], 0.3), (['D', 'E'], 0.7)]
assert grammar.isa('the', 'Article')
grammar = nlp.E_Prob_Chomsky
for rule in grammar.cnf_rules():
assert len(rule) == 4
def test_prob_generation():
lexicon = ProbLexicon(Verb="am [0.5] | are [0.25] | is [0.25]",
Pronoun="i [0.4] | you [0.3] | he [0.3]")
rules = ProbRules(
S="Verb [0.5] | More [0.3] | Pronoun [0.1] | nobody is here [0.1]",
More="Pronoun Verb [0.7] | Pronoun Pronoun [0.3]"
)
grammar = ProbGrammar("Simplegram", rules, lexicon)
sentence = grammar.generate_random('S')
assert len(sentence) == 2
def test_chart_parsing():
chart = Chart(nlp.E0)
parses = chart.parses('the stench is in 2 2')
assert len(parses) == 1
def test_CYK_parse():
grammar = nlp.E_Prob_Chomsky
words = ['the', 'robot', 'is', 'good']
P = CYK_parse(words, grammar)
assert len(P) == 52
grammar = nlp.E_Prob_Chomsky_
words = ['astronomers', 'saw', 'stars']
P = CYK_parse(words, grammar)
assert len(P) == 32
# ______________________________________________________________________________
# Data Setup
testHTML = """Keyword String 1: A man is a male human.
Keyword String 2: Like most other male mammals, a man inherits an
X from his mom and a Y from his dad.
Links:
href="https://google.com.au"
< href="/wiki/TestThing" > href="/wiki/TestBoy"
href="/wiki/TestLiving" href="/wiki/TestMan" >"""
testHTML2 = "a mom and a dad"
testHTML3 = """
<!DOCTYPE html>
<html>
<head>
<title>Page Title</title>
</head>
<body>
<p>AIMA book</p>
</body>
</html>
"""
pA = Page("A", ["B", "C", "E"], ["D"], 1, 6)
pB = Page("B", ["E"], ["A", "C", "D"], 2, 5)
pC = Page("C", ["B", "E"], ["A", "D"], 3, 4)
pD = Page("D", ["A", "B", "C", "E"], [], 4, 3)
pE = Page("E", [], ["A", "B", "C", "D", "F"], 5, 2)
pF = Page("F", ["E"], [], 6, 1)
pageDict = {pA.address: pA, pB.address: pB, pC.address: pC,
pD.address: pD, pE.address: pE, pF.address: pF}
nlp.pagesIndex = pageDict
nlp.pagesContent = {pA.address: testHTML, pB.address: testHTML2,
pC.address: testHTML, pD.address: testHTML2,
pE.address: testHTML, pF.address: testHTML2}
# This test takes a long time (> 60 secs)
# def test_loadPageHTML():
# # first format all the relative URLs with the base URL
# addresses = [examplePagesSet[0] + x for x in examplePagesSet[1:]]
# loadedPages = loadPageHTML(addresses)
# relURLs = ['Ancient_Greek','Ethics','Plato','Theology']
# fullURLs = ["https://en.wikipedia.org/wiki/"+x for x in relURLs]
# assert all(x in loadedPages for x in fullURLs)
# assert all(loadedPages.get(key,"") != "" for key in addresses)
@patch('urllib.request.urlopen', return_value=BytesIO(testHTML3.encode()))
def test_stripRawHTML(html_mock):
addr = "https://en.wikipedia.org/wiki/Ethics"
aPage = loadPageHTML([addr])
someHTML = aPage[addr]
strippedHTML = stripRawHTML(someHTML)
assert "<head>" not in strippedHTML and "</head>" not in strippedHTML
assert "AIMA book" in someHTML and "AIMA book" in strippedHTML
def test_determineInlinks():
assert set(determineInlinks(pA)) == set(['B', 'C', 'E'])
assert set(determineInlinks(pE)) == set([])
assert set(determineInlinks(pF)) == set(['E'])
def test_findOutlinks_wiki():
testPage = pageDict[pA.address]
outlinks = findOutlinks(testPage, handleURLs=onlyWikipediaURLS)
assert "https://en.wikipedia.org/wiki/TestThing" in outlinks
assert "https://en.wikipedia.org/wiki/TestThing" in outlinks
assert "https://google.com.au" not in outlinks
# ______________________________________________________________________________
# HITS Helper Functions
def test_expand_pages():
pages = {k: pageDict[k] for k in ('F')}
pagesTwo = {k: pageDict[k] for k in ('A', 'E')}
expanded_pages = expand_pages(pages)
assert all(x in expanded_pages for x in ['F', 'E'])
assert all(x not in expanded_pages for x in ['A', 'B', 'C', 'D'])
expanded_pages = expand_pages(pagesTwo)
print(expanded_pages)
assert all(x in expanded_pages for x in ['A', 'B', 'C', 'D', 'E', 'F'])
def test_relevant_pages():
pages = relevant_pages("his dad")
assert all((x in pages) for x in ['A', 'C', 'E'])
assert all((x not in pages) for x in ['B', 'D', 'F'])
pages = relevant_pages("mom and dad")
assert all((x in pages) for x in ['A', 'B', 'C', 'D', 'E', 'F'])
pages = relevant_pages("philosophy")
assert all((x not in pages) for x in ['A', 'B', 'C', 'D', 'E', 'F'])
def test_normalize():
normalize(pageDict)
print(page.hub for addr, page in nlp.pagesIndex.items())
expected_hub = [1 / 91 ** 0.5, 2 / 91 ** 0.5, 3 / 91 ** 0.5, 4 / 91 ** 0.5, 5 / 91 ** 0.5,
6 / 91 ** 0.5] # Works only for sample data above
expected_auth = list(reversed(expected_hub))
assert len(expected_hub) == len(expected_auth) == len(nlp.pagesIndex)
assert expected_hub == [page.hub for addr, page in sorted(nlp.pagesIndex.items())]
assert expected_auth == [page.authority for addr, page in sorted(nlp.pagesIndex.items())]
def test_detectConvergence():
# run detectConvergence once to initialise history
convergence = ConvergenceDetector()
convergence()
assert convergence() # values haven't changed so should return True
# make tiny increase/decrease to all values
for _, page in nlp.pagesIndex.items():
page.hub += 0.0003
page.authority += 0.0004
# retest function with values. Should still return True
assert convergence()
for _, page in nlp.pagesIndex.items():
page.hub += 3000000
page.authority += 3000000
# retest function with values. Should now return false
assert not convergence()
def test_getInlinks():
inlnks = getInLinks(pageDict['A'])
assert sorted(inlnks) == pageDict['A'].inlinks
def test_getOutlinks():
outlnks = getOutLinks(pageDict['A'])
assert sorted(outlnks) == pageDict['A'].outlinks
def test_HITS():
HITS('inherit')
auth_list = [pA.authority, pB.authority, pC.authority, pD.authority, pE.authority, pF.authority]
hub_list = [pA.hub, pB.hub, pC.hub, pD.hub, pE.hub, pF.hub]
assert max(auth_list) == pD.authority
assert max(hub_list) == pE.hub
if __name__ == '__main__':
pytest.main()
|
|
# -*- coding: utf-8 -*-
# Copyright: Damien Elmes <anki@ichi2.net>
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
import re, sys, threading, time, subprocess, os, atexit
import random
from anki.hooks import addHook
from anki.utils import tmpdir, isWin, isMac
# Shared utils
##########################################################################
_soundReg = "\[sound:(.*?)\]"
def playFromText(text):
for match in re.findall(_soundReg, text):
play(match)
def stripSounds(text):
return re.sub(_soundReg, "", text)
def hasSound(text):
return re.search(_soundReg, text) is not None
##########################################################################
processingSrc = u"rec.wav"
processingDst = u"rec.mp3"
processingChain = []
recFiles = []
processingChain = [
["lame", "rec.wav", processingDst, "--noreplaygain", "--quiet"],
]
# don't show box on windows
if isWin:
si = subprocess.STARTUPINFO()
try:
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
except:
# python2.7+
si.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW
else:
si = None
if isMac:
# make sure lame, which is installed in /usr/local/bin, is in the path
os.environ['PATH'] += ":" + "/usr/local/bin"
dir = os.path.dirname(os.path.abspath(__file__))
dir = os.path.abspath(dir + "/../../../..")
os.environ['PATH'] += ":" + dir + "/audio"
def retryWait(proc):
# osx throws interrupted system call errors frequently
while 1:
try:
return proc.wait()
except OSError:
continue
# Mplayer settings
##########################################################################
if isWin:
mplayerCmd = ["mplayer.exe", "-ao", "win32"]
dir = os.path.dirname(os.path.abspath(sys.argv[0]))
os.environ['PATH'] += ";" + dir
os.environ['PATH'] += ";" + dir + "\\..\\win\\top" # for testing
else:
mplayerCmd = ["mplayer"]
mplayerCmd += ["-really-quiet", "-noautosub"]
# Mplayer in slave mode
##########################################################################
mplayerQueue = []
mplayerManager = None
mplayerReader = None
mplayerEvt = threading.Event()
mplayerClear = False
class MplayerMonitor(threading.Thread):
def run(self):
global mplayerClear
self.mplayer = None
self.deadPlayers = []
while 1:
mplayerEvt.wait()
mplayerEvt.clear()
# clearing queue?
if mplayerClear and self.mplayer:
try:
self.mplayer.stdin.write("stop\n")
except:
# mplayer quit by user (likely video)
self.deadPlayers.append(self.mplayer)
self.mplayer = None
# loop through files to play
while mplayerQueue:
# ensure started
if not self.mplayer:
self.startProcess()
# pop a file
try:
item = mplayerQueue.pop(0)
except IndexError:
# queue was cleared by main thread
continue
if mplayerClear:
mplayerClear = False
extra = ""
else:
extra = " 1"
cmd = 'loadfile "%s"%s\n' % (item, extra)
try:
self.mplayer.stdin.write(cmd)
except:
# mplayer has quit and needs restarting
self.deadPlayers.append(self.mplayer)
self.mplayer = None
self.startProcess()
self.mplayer.stdin.write(cmd)
# if we feed mplayer too fast it loses files
time.sleep(1)
# wait() on finished processes. we don't want to block on the
# wait, so we keep trying each time we're reactivated
def clean(pl):
if pl.poll() is not None:
pl.wait()
return False
else:
return True
self.deadPlayers = [pl for pl in self.deadPlayers if clean(pl)]
def kill(self):
if not self.mplayer:
return
try:
self.mplayer.stdin.write("quit\n")
self.deadPlayers.append(self.mplayer)
except:
pass
self.mplayer = None
def startProcess(self):
try:
cmd = mplayerCmd + ["-slave", "-idle"]
devnull = file(os.devnull, "w")
self.mplayer = subprocess.Popen(
cmd, startupinfo=si, stdin=subprocess.PIPE,
stdout=devnull, stderr=devnull)
except OSError:
mplayerEvt.clear()
raise Exception("Did you install mplayer?")
def queueMplayer(path):
ensureMplayerThreads()
if isWin and os.path.exists(path):
# mplayer on windows doesn't like the encoding, so we create a
# temporary file instead. oddly, foreign characters in the dirname
# don't seem to matter.
dir = tmpdir()
name = os.path.join(dir, "audio%s%s" % (
random.randrange(0, 1000000), os.path.splitext(path)[1]))
f = open(name, "wb")
f.write(open(path, "rb").read())
f.close()
# it wants unix paths, too!
path = name.replace("\\", "/")
path = path.encode(sys.getfilesystemencoding())
else:
path = path.encode("utf-8")
mplayerQueue.append(path)
mplayerEvt.set()
def clearMplayerQueue():
global mplayerClear, mplayerQueue
mplayerQueue = []
mplayerClear = True
mplayerEvt.set()
def ensureMplayerThreads():
global mplayerManager
if not mplayerManager:
mplayerManager = MplayerMonitor()
mplayerManager.daemon = True
mplayerManager.start()
# ensure the tmpdir() exit handler is registered first so it runs
# after the mplayer exit
tmpdir()
# clean up mplayer on exit
atexit.register(stopMplayer)
def stopMplayer(*args):
if not mplayerManager:
return
mplayerManager.kill()
addHook("unloadProfile", stopMplayer)
# PyAudio recording
##########################################################################
try:
import pyaudio
import wave
PYAU_FORMAT = pyaudio.paInt16
PYAU_CHANNELS = 1
PYAU_INPUT_INDEX = None
except:
pass
class _Recorder(object):
def postprocess(self, encode=True):
self.encode = encode
for c in processingChain:
#print c
if not self.encode and c[0] == 'lame':
continue
try:
ret = retryWait(subprocess.Popen(c, startupinfo=si))
except:
ret = True
if ret:
raise Exception(_(
"Error running %s") %
u" ".join(c))
class PyAudioThreadedRecorder(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.finish = False
def run(self):
chunk = 1024
try:
p = pyaudio.PyAudio()
except NameError:
raise Exception(
"Pyaudio not installed (recording not supported on OSX10.3)")
rate = int(p.get_default_input_device_info()['defaultSampleRate'])
stream = p.open(format=PYAU_FORMAT,
channels=PYAU_CHANNELS,
rate=rate,
input=True,
input_device_index=PYAU_INPUT_INDEX,
frames_per_buffer=chunk)
all = []
while not self.finish:
try:
data = stream.read(chunk)
except IOError, e:
if e[1] == pyaudio.paInputOverflowed:
data = None
else:
raise
if data:
all.append(data)
stream.close()
p.terminate()
data = ''.join(all)
wf = wave.open(processingSrc, 'wb')
wf.setnchannels(PYAU_CHANNELS)
wf.setsampwidth(p.get_sample_size(PYAU_FORMAT))
wf.setframerate(rate)
wf.writeframes(data)
wf.close()
class PyAudioRecorder(_Recorder):
def __init__(self):
for t in recFiles + [processingSrc, processingDst]:
try:
os.unlink(t)
except OSError:
pass
self.encode = False
def start(self):
self.thread = PyAudioThreadedRecorder()
self.thread.start()
def stop(self):
self.thread.finish = True
self.thread.join()
def file(self):
if self.encode:
tgt = u"rec%d.mp3" % time.time()
os.rename(processingDst, tgt)
return tgt
else:
return processingSrc
# Audio interface
##########################################################################
_player = queueMplayer
_queueEraser = clearMplayerQueue
def play(path):
_player(path)
def clearAudioQueue():
_queueEraser()
Recorder = PyAudioRecorder
|
|
### readjobsplot_testfede.py
#Modified by F.Fanzago
###
import sys
from ROOT import *
import os
import math
from tdrStyle import *
if (len(sys.argv) != 8):
print "Usage: python make_stat_plots.py test_files test_duration size_of_time_bins test_start_time RTT site_name target_time"
name = sys.argv[1]
nn = name.find("readfile")
print "READING TEST RESULT FAILED not enough parameters for plots", name[0:nn-1]
sys.exit(1)
tdrstyle = setTDRStyle()
tdrStyle.SetPadRightMargin(0.05);
gStyle.SetOptStat(0)
gStyle.SetOptTitle(1)
gStyle.SetTitleX(0.5) # Trick to center histogram title
gStyle.SetTitleAlign(23) # Trick to center histogram title
texbox = TLatex(0.60, 0.90, "Expected rate: 2.5 MB / 10 s")
texbox.SetNDC()
texbox.SetTextAlign(12) # Left-adjusted
texbox.SetTextFont(42)
texbox.SetTextSize(0.03)
texbox.SetLineWidth(2)
print "Opening list of test files..." + sys.argv[1]
#test_files = open(sys.argv[1])
test_length = float(sys.argv[2]) + 1
bin_size = float(sys.argv[3])
nbins = int(test_length/bin_size) + 1
overall_start_time = float(sys.argv[4])
rtt = float(sys.argv[5])
sitename = str(sys.argv[6])
target_time = float(sys.argv[7]) # Time in s to read 1 MB
rttmsg = "RTT %g s (not included)" % rtt
texboxrtt = TLatex(0.20, 0.88, rttmsg)
texboxrtt.SetNDC()
texboxrtt.SetTextAlign(12) # Left-adjusted
texboxrtt.SetTextFont(42)
texboxrtt.SetTextSize(0.027)
texboxrtt.SetLineWidth(2)
# setup interval mapping structure
intervals = {}
for i in range(nbins):
intervals[str(i)] = [ i*bin_size, (i+1)*bin_size ]
#print "printing dictionary 'intervals'..."
#print intervals
print "Assuming test of length %f seconds, bin size %f seconds, yielding %f bins" % (int(test_length), int(bin_size), nbins)
print "Further assuming that the test began at time %f seconds" % overall_start_time
hist_active_jobs = TH1F("hist_active_jobs", "concurrent active jobs as a function of time", nbins, 0, test_length)
hist_dataread = TH1F("hist_dataread", "data read as a function of time", nbins, 0, test_length)
hist_job_successes = TH1F("hist_job_successes", "concurrent job successes as a function of time", nbins, 0, test_length)
hist_opentimes = TH1F("hist_opentimes", "concurrent job successes as a function of time", nbins, 0, test_length)
hist_sleeptimes = TH1F("hist_sleeptimes", "time jobs sleep as a function of time", nbins, 0, test_length)
hist_opentimes.Sumw2()
hist_dataread.Sumw2()
hist_sleeptimes.Sumw2()
#hist_active_jobs.SetMaximum(1)
test_files = open(sys.argv[1])
num_lines = 0
filelines = []
for line in test_files:
num_lines = num_lines + 1
filelines.append(line)
# num_lines = 701
print 'numln = ', num_lines
binjoblist = []
for job_num in xrange(num_lines):
binjoblist.append([])
for bin_num in xrange(nbins + 1):
binjoblist[job_num].append(0)
# print 'bjl ', binjoblist
job_num = 0
# for filename in test_files:
for filename in filelines:
filename = filename.rstrip('\n')
# print "opening file %s ..." % filename
file = open(filename)
job_in_bin = 0
first_read_time = 0.0
for line in file:
if ("duration" in line):
# print line
results = line.split()
if (len(results) < 11): continue;
# start_str = str(results[2])
start_time = float(results[2]) - overall_start_time
start_time_frac = float(results[3])
start_time = start_time + start_time_frac
# start_time = int(start_str[:-1]) - overall_start_time
data_read = float(results[9]) / 1024.0 # in KB
if (results[10] != "kB" and target_time < 100):
data_read = data_read / 1024.0 # in MB
run_time = float(results[5]) # tempo per leggere il blocco di 2.5 MB
if (first_read_time == -1.0):
if (run_time < bin_size and start_time < run_time):
first_read_time = run_time
else:
first_read_time = 0.0
# Increment all times to account for 1st read
start_time = start_time + first_read_time
if (rtt < run_time):
run_time = run_time - rtt
# print start_time, data_read, run_time
hist_job_successes.Fill(start_time)
hist_opentimes.Fill(start_time, run_time)
hist_dataread.Fill(start_time, data_read)
bin_num = hist_active_jobs.FindBin(start_time)
if (bin_num > nbins):
print 'bad bin, start time ', bin_num, start_time
elif (binjoblist[job_num][bin_num] == 0):
# fill each time interval at most once per job
hist_active_jobs.Fill(start_time)
binjoblist[job_num][bin_num] = 1
elif ("Sleeping" in line):
results = line.split()
if (len(results) < 8): continue;
start_time = float(results[2]) - overall_start_time
start_time_frac = float(results[3])
start_time = start_time + start_time_frac
# Increment all times to account for 1st read
start_time = start_time + first_read_time
run_time = float(results[7])
hist_sleeptimes.Fill(start_time, run_time)
job_num = job_num + 1
file.close()
test_files.close()
# print 'bjl ', binjoblist
#fede--------------------------------
max_clients=hist_active_jobs.GetMaximum()
#print "max_clients", max_clients
#print "expected reading rate n_clients * 0.25 MB/s", max_clients / target_time
#fede-----------------------------------
# make plot of success/failure rate vs # clients running concurrently
n_clients = TVectorF()
sf_rate = TVectorF()
graph1 = TGraphErrors()
graph2 = TGraphErrors() # time vs. failure rate
graph3b = TGraphErrors()
graph3 = TGraphErrors() # number of concurrent clients vs. average runtime
graph4 = TGraphErrors() # avg runtime vs. time
hist_readrate = hist_dataread.Clone()
hist_datasave = hist_dataread.Clone("datasave")
hist_readrate.Divide(hist_opentimes)
print 'read integral = ', hist_dataread.Integral(0, nbins - 1)
hist_dataread.Scale(1.0/bin_size) # Get total rate # tot dataread (in blocchi in MB) / tempo del bin
hist_totrate = hist_dataread.Clone()
hist_dataread.Divide(hist_active_jobs) # Get rate / job (rate per client)
timeval = target_time
if (target_time > 100):
timeval = timeval / 1024 # Convert to kB
hist_dataread.Scale(timeval) # Get % of attempted rate
################################################################################################
# hist_totrate sono i byte totali scalati sulla lunghezza del bin
# hist_dataread sono i byte totali scalato sulla lunghezza del bin e sul numero di client
# hist_readrate sono i byte totali scalati per il tempo totale di apertura
################################################################################################
hist_sleeptimes.Add(hist_opentimes) # Get total job time
hist_iotimes = hist_opentimes.Clone()
hist_iotimes .Divide(hist_sleeptimes) # Get time % time doing I/O
hist_timepread = hist_opentimes.Clone();
hist_timepread.Divide(hist_job_successes); # Get avg. read time
#fede ------------------
max_obtained_totalreadrate = 0.0
observed_totalreadrate_with_max_clients = 0.0
clients_with_max_obtained_totalreadrate = 0
#fede ------------------
for i in range(nbins - 1): # Omit last bin because it's incomplete
#print "bin = ", i
n_clients = hist_active_jobs.GetBinContent(i+1)
#print "n_clients = ", n_clients
s = hist_job_successes.GetBinContent(i+1)
#print "success = ", s
totaldataread = hist_datasave.GetBinContent(i+1)
#print "totaldataread originale ", totaldataread
totalrate = hist_totrate.GetBinContent(i+1)
#print "totalrate: totaldataread / lunghezza del bin = ", totalrate
totrateErr = hist_totrate.GetBinError(i+1)
#print "totrateErr = ", totrateErr
opentime = hist_opentimes.GetBinContent(i+1)
#print "opentime tempo totale apertura = ", opentime
rate = hist_readrate.GetBinContent(i+1)
#print "rate = byte totali scalati sul tempo totale di apertura", rate
rateErr = hist_readrate.GetBinError(i+1)
#print "rateErr = ", rateErr
percrate = hist_dataread.GetBinContent(i+1)
#print "percrate = byte totali scalati sulla lunghezza del bin, sul numero di client e sui secondi teorici per leggere 1 MB (4)", percrate
percrateErr = hist_dataread.GetBinError(i+1)
#print "percrateErr = ", percrateErr
iotime = hist_iotimes.GetBinContent(i+1)
#print "iotime = ", iotime
iotimeErr = hist_iotimes.GetBinError(i+1)
#print "iotimeErr = ", iotimeErr
readtime = hist_timepread.GetBinContent(i+1)
#print "readtime = total open time / success", readtime
readtimeErr = hist_timepread.GetBinError(i+1)
#print "readtimeErr ", readtimeErr
run_times_combined = hist_opentimes.GetBinContent(i+1)
#print "run_times_combined", run_times_combined
if (s == 0): continue
run_times_error = hist_opentimes.GetBinError(i+1)
binNum = graph1.GetN()
graph1.SetPoint(binNum, n_clients, iotime)
graph1.SetPointError(binNum, 0.0, iotimeErr)
binNum = graph3.GetN()
graph3.SetPoint(binNum, n_clients, rate)
graph3.SetPointError(binNum, 0.0, rateErr)
binNum = graph3b.GetN()
graph3b.SetPoint(binNum, n_clients, readtime)
graph3b.SetPointError(binNum, 0.0, readtimeErr)
binNum = graph2.GetN()
graph2.SetPoint(binNum, n_clients, totalrate)
print 'njobs, totalrate ', n_clients, totalrate
if (n_clients == max_clients):
#print "fede -------"
#print "real total rate with maximum clients = ", totalrate
observed_totalreadrate_with_max_clients = totalrate
#print "fede -------"
if (totalrate > max_obtained_totalreadrate):
max_obtained_totalreadrate = totalrate
clients_with_max_obtained_totalreadrate = n_clients
graph2.SetPointError(binNum, 0.0, totrateErr)
binNum = graph4.GetN()
graph4.SetPoint(binNum, n_clients, percrate)
graph4.SetPointError(binNum, 0.0, percrateErr)
print 'njobs, avg time ', n_clients, run_times_combined/(s)
# graph4.SetPoint(graph4.GetN(), i*bin_size, performance_measure )
name = sys.argv[1]
nn = name.find("readfile")
#print name[0:nn]
summary_name=name[0:nn]+"summary"
#print "summary_name = ", summary_name
#summary_file="plots/"+summary_name
# fede-----------------------------------------------
print "------------------------------------------------"
print "------------------------------------------------"
#print "SUMMARY:", sys.argv[6]
print "READING TEST SUMMARY:", summary_name
print "max_obtained_totalreadrate = ", max_obtained_totalreadrate
print "with clients = ", clients_with_max_obtained_totalreadrate
print "expected total read rate with this number of clients = ", clients_with_max_obtained_totalreadrate / target_time
print "------------------------------------------------"
print "max_clients ", max_clients
print "observed total read rate with max_clients ", observed_totalreadrate_with_max_clients
print "expected total read rate with the max number of clients = ", max_clients / target_time
print "------------------------------------------------"
#
if (max_obtained_totalreadrate < 150 and max_clients < 600):
print "READING TEST RESULT PROBLEM max obtained totalreadrate lower than 150 MB and max_clients < 600 ", max_obtained_totalreadrate, max_clients, name[0:nn-1]
elif (max_clients >= 600 and max_obtained_totalreadrate < 150):
print "READING TEST RESULT WARNING max obtained totalreadrate lower than 150 MB ", max_obtained_totalreadrate, name[0:nn-1]
else:
print "READING TEST RESULT OK ", name[0:nn-1]
print "------------------------------------------------"
print "------------------------------------------------"
# fede-----------------------------------------------
binNum = graph2.GetN()
graph2.SetPoint(binNum, 0, 0)
c1 = TCanvas("c1", "c1")
c1.SetGridy()
#graph.Draw()
outfilebase = sys.argv[1][:-4]
ofname = outfilebase + ".root"
print "ofname: %s" % ofname
output_file = TFile(ofname, "RECREATE")
#hist_active_jobs.Draw()
# graph2.SetMarkerStyle(8) # big dot
rate_units = "[MB/s]"
if (target_time > 100):
rate_units = "[kB/s]"
graph3.SetMarkerStyle(8) # big dot
graph3.SetTitle(sitename)
# graph3.GetXaxis().SetTitle("Expected file-open rate (Hz)")
graph3.GetXaxis().SetTitle("# of jobs")
# graph3.GetXaxis().SetTitle("Total attempted read rate [MB/s]")
# graph3.GetYaxis().SetTitle("Observed file-open rate (Hz)")
graph3.GetYaxis().SetTitle("Avg. read rate per block " + rate_units)
# graph3.GetYaxis().SetRangeUser(0, 250)
# graph3.GetXaxis().SetRangeUser(0, 250)
graph3.Draw("APZ")
c1.SaveAs("plots/" + outfilebase + "_rate_vs_jobs.png")
graph3b.SetMarkerStyle(8) # big dot
graph3b.SetTitle(sitename)
graph3b.GetXaxis().SetTitle("# of jobs")
# graph3b.GetYaxis().SetRangeUser(0, 30.0)
if num_lines > 1500:
labsiz = 0.03
else:
labsiz = 0.04
graph3b.GetXaxis().SetLabelSize(labsiz)
graph3b.GetYaxis().SetLabelSize(labsiz)
graph3b.GetYaxis().SetTitle("Avg. read time per block [s]")
graph3b.GetYaxis().SetTitleOffset(1.35)
graph3b.Draw("APZ")
texboxrtt.Draw("same")
c1.SaveAs("plots/" + outfilebase + "_time_vs_jobs.png")
graph4.SetMarkerStyle(8) # big dot
graph4.SetTitle(sitename)
graph4.GetXaxis().SetTitle("# of jobs")
# graph3.GetYaxis().SetTitle("Observed file-open rate (Hz)")
graph4.GetYaxis().SetTitle("Overall read rate / expected rate")
# graph3.GetYaxis().SetRangeUser(0, 250)
# graph3.GetXaxis().SetRangeUser(0, 250)
graph4.Draw("APZ")
texbox.Draw("same")
# c1.SaveAs("plots/" + outfilebase + "_percrate_vs_jobs.png")
hist_readrate.SetMarkerStyle(8) # big dot
hist_readrate.SetTitle(sitename)
hist_readrate.GetXaxis().SetTitle("Time [s]")
hist_readrate.GetXaxis().SetLabelSize(labsiz)
hist_readrate.GetYaxis().SetLabelSize(labsiz)
hist_readrate.GetYaxis().SetTitle("Avg. rate / read " + rate_units)
# hist_readrate.GetYaxis().SetTitleOffset(1.4)
hist_readrate.Draw("ep")
#os.system("sleep 5")
c1.SaveAs("plots/" + outfilebase + "_rate_vs_time.png")
hist_dataread.SetMarkerStyle(8) # big dot
hist_dataread.SetTitle(sitename)
hist_dataread.GetXaxis().SetTitle("Time [s]")
hist_dataread.GetXaxis().SetLabelSize(0.03)
hist_dataread.GetYaxis().SetLabelSize(0.03)
hist_dataread.GetYaxis().SetTitle("Overall read rate / expected rate")
hist_dataread.Draw("ep")
c1.SaveAs("plots/" + outfilebase + "_percent_vs_time.png")
hist_iotimes.SetMarkerStyle(8) # big dot
hist_iotimes.SetTitle(sitename)
hist_iotimes.GetXaxis().SetTitle("Time [s]")
hist_iotimes.GetXaxis().SetLabelSize(0.03)
hist_iotimes.GetYaxis().SetLabelSize(0.03)
hist_iotimes.GetYaxis().SetTitle("Fraction of time waiting for I/O")
hist_iotimes.Draw("ep")
c1.SaveAs("plots/" + outfilebase + "_io_vs_time.png")
graph1.SetMarkerStyle(8) # big dot
graph1.SetTitle(sitename)
graph1.GetXaxis().SetTitle("# of jobs")
graph1.GetXaxis().SetLabelSize(labsiz)
graph1.GetYaxis().SetLabelSize(labsiz)
graph1.GetYaxis().SetTitle("Fraction of time waiting for I/O")
graph1.Draw("APZ")
c1.SaveAs("plots/" + outfilebase + "_io_vs_jobs.png")
# graph2hist = TH2F("test histo", "test histo2", 20, 0.0, 820.0, 20, 0.0, 210.0)
#graph2hist.GetXaxis().SetRangeUser(0.0, 820.0)
# graph2.GetXaxis().SetRangeUser(0.0, 820.0)
# graph2hist.SetTitle(sitename)
# graph2hist.GetXaxis().SetTitle("# of jobs")
# graph2hist.GetYaxis().SetTitle("Total read rate [MB/s]")
# graph2hist.Draw()
# graph2.GetYaxis().SetTitleOffset(1.1)
# graph2.GetYaxis().SetRangeUser(0, 500)
# graph2.Draw("PZ same")
# TGaxis.SetMaxDigits(3) # Puts x 10^3 by axis
graph2.SetTitle(sitename)
graph2.GetXaxis().SetTitle("# of jobs")
graph2.GetYaxis().SetTitle("Total read rate " + rate_units)
graph2.GetXaxis().SetLabelSize(labsiz)
graph2.GetYaxis().SetLabelSize(labsiz)
graph2.Draw("APZ")
# xmax = 820.0
# graphmax = 210.0
# xmax = 1640.0
# graphmax = 410.0
xmax = graph2.GetXaxis().GetXmax()
xmin = graph2.GetXaxis().GetXmin()
#print "---- > xmax = ", xmax
#print "---- > xmin = ", xmin
graphmax = graph2.GetHistogram().GetMaximum()
graphmin = graph2.GetHistogram().GetMinimum()
print 'graphmax max ', graphmax, xmax
print 'graphmin min ', graphmin, xmin
# if graphmax > 250:
# graphmax = 250
if graphmax < xmax / timeval:
xmax = graphmax * timeval
else:
graphmax = xmax / timeval
print 'maxes ', xmax, graphmax
evenline = TLine(0.0, 0.0, xmax, graphmax)
evenline.SetLineColor(8)
evenline.SetLineWidth(2)
evenline.SetLineStyle(1)
evenline.Draw("same")
c1.SaveAs("plots/" + outfilebase + "_read_vs_jobs.png")
#os.system("sleep 3")
#hist_job_successes.Draw()
#os.system("sleep 3")
#hist_readrate.Draw()
#os.system("sleep 3")
#c1.SaveAs("canvas.root")
hist_active_jobs.Write()
hist_job_successes.Write()
hist_readrate.Write()
hist_datasave.Write()
hist_opentimes.Write()
hist_sleeptimes.Write()
hist_totrate.Write()
hist_timepread.Write()
# graph1.Write("nClients_vs_rate")
graph2.Write("readvsjobs")
# graph3.Write("exprate_vs_performance")
# graph3_b.Write("nClients_vs_avgruntime")
graph4.Write("percrate_vs_jobs")
#output_file.Write()
c1.Close()
output_file.Close()
|
|
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel lib provides a way to read and write SavedModels.
This is an internal Hub utility and not part of the public API.
"""
import collections
import os
import re
from absl import logging
import tensorflow as tf
from tensorflow_hub import module_attachment_pb2
from tensorflow_hub import tf_utils
from google.protobuf import message
# pylint: disable=g-direct-tensorflow-import
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saved_model_pb2
# pylint: enable=g-direct-tensorflow-import
# A collection of pairs (key: string, definition : SignatureDef) used internally
# to propagate signatures defined in a Graph to SavedModel. The collection key
# is a tuple (not a string) in order to make it invisible from user apis such
# as `get_all_collection_keys()` and manual exporting to meta_graphs.
_SIGNATURE_COLLECTION = ("__saved_model_lib_signatures",)
# A collection of ModuleAttachment protos is used internally to collect
# the (key, value) pairs passed to attach_message() calls from the module_fn.
# As above, it gets a non-string name to make it invisible within module_fn.
_ATTACHMENT_COLLECTION_INTERNAL = ("__hub_module_attachments",)
# The ModuleAttachment protos are stored in SavedModel.meta_graphs (but never
# in tf.Graphs) as CollectionDef.bytes_list under this key.
ATTACHMENT_COLLECTION_SAVED = "hub_module_attachments"
def get_variables_path(export_dir):
"""Returns the path for storing variables checkpoints."""
return os.path.join(
tf.compat.as_bytes(export_dir),
tf.compat.as_bytes(tf.compat.v1.saved_model.VARIABLES_DIRECTORY),
tf.compat.as_bytes(tf.compat.v1.saved_model.VARIABLES_FILENAME))
def _get_assets_dir(export_dir):
return os.path.join(
tf.compat.as_bytes(export_dir),
tf.compat.as_bytes(tf.compat.v1.saved_model.ASSETS_DIRECTORY))
def _get_asset_filename(export_dir, asset_filename):
assets_dir = _get_assets_dir(export_dir)
filename = os.path.join(
tf.compat.as_bytes(assets_dir),
tf.compat.as_bytes(asset_filename))
if not tf_utils.absolute_path(filename).startswith(
tf_utils.absolute_path(assets_dir)):
raise ValueError(
"Asset filename (%s) points outside assets_dir" % asset_filename)
logging.debug("Asset filename: %s", filename)
return filename
def _get_saved_model_proto_path(export_dir):
return os.path.join(
tf.compat.as_bytes(export_dir),
tf.compat.as_bytes(tf.compat.v1.saved_model.SAVED_MODEL_FILENAME_PB))
def _get_node_name_from_tensor(tensor_name):
"""tensor_name must have format node_name:output_number. Returns node_name."""
result = re.match(r"([^:]*):\d+$", tensor_name)
if not result:
raise ValueError(
"Unexpected format for tensor name. Expected node_name:output_number. "
"Got %r" % tensor_name)
return result.group(1)
def add_signature(key, inputs, outputs):
"""Adds a signature to current graph.
Args:
key: Signature key as a string.
inputs: Signature inputs as a map from string to Tensor or composite tensor
(such as SparseTensor or RaggedTensor).
outputs: Signature outputs as a map from string to Tensor or composite
tensor. (Recall that a Variable is not a Tensor, but Variable.value() is.)
Raises:
TypeError: if the arguments have the wrong types.
"""
_check_dict_maps_to_tensors_or_composite_tensors(inputs)
_check_dict_maps_to_tensors_or_composite_tensors(outputs)
input_info = {
input_name: tf.compat.v1.saved_model.utils.build_tensor_info(tensor)
for input_name, tensor in inputs.items()
}
output_info = {
output_name: tf.compat.v1.saved_model.utils.build_tensor_info(tensor)
for output_name, tensor in outputs.items()
}
signature = tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
input_info, output_info)
tf.compat.v1.add_to_collection(_SIGNATURE_COLLECTION, (key, signature))
def _check_dict_maps_to_tensors_or_composite_tensors(tensor_map):
for key, value in tensor_map.items():
if not (isinstance(value, tf.Tensor) or
tf_utils.is_composite_tensor(value)):
raise TypeError(
"Value for key '%s' should be a Tensor or CompositeTensor object, "
"found %s." % (key, type(value)))
def _export_signatures(meta_graph):
"""Exports signatures from current graph into a MetaGraphDef."""
named_signatures = tf.compat.v1.get_collection(_SIGNATURE_COLLECTION)
if not named_signatures:
raise ValueError("No signatures present. Please call hub.add_signature(...)"
"at least once in the module_fn.")
for key, signature in named_signatures:
meta_graph.signature_def[key].CopyFrom(signature)
def attach_bytes(key, the_bytes):
"""Adds a ModuleAttachment to the current graph.
Args:
key: A string with the unique key of the attachment.
the_bytes: A bytes object with the serialized attachment.
"""
tf.compat.v1.add_to_collection(
_ATTACHMENT_COLLECTION_INTERNAL,
module_attachment_pb2.ModuleAttachment(key=key, value=the_bytes))
def _export_module_attachments(meta_graph):
"""Exports ModuleAttachments from the current tf.Graph into `meta_graph`."""
added_attachments = tf.compat.v1.get_collection(
_ATTACHMENT_COLLECTION_INTERNAL)
if not added_attachments: return # Don't touch `meta_graph`.
unique_attachments = collections.OrderedDict( # Avoid indeterminism.
(attachment.key, attachment)
for attachment in added_attachments)
meta_graph.collection_def[ATTACHMENT_COLLECTION_SAVED].bytes_list.value[:] = [
attachment.SerializeToString()
for attachment in unique_attachments.values()]
def get_attached_bytes_map(meta_graph):
"""Returns the dict of ModuleAttachments stored in `meta_graph`.
Args:
meta_graph: A MetaGraphDef, as built by SavedModelHandler.add_graph_copy()
from some graph.
Returns:
A dict, containing the `(key, bytes)` items passed to `attach_bytes()`
when the graph had been built.
Raises:
ValueError: if `meta-graph` is malformed.
"""
result = {}
if ATTACHMENT_COLLECTION_SAVED not in meta_graph.collection_def:
return result
collection_def = meta_graph.collection_def[ATTACHMENT_COLLECTION_SAVED]
if collection_def.WhichOneof("kind") != "bytes_list":
raise ValueError(
"Internal CollectionDef for attached messages has kind %s, "
"expected bytes_list" % collection_def.WhichOneof("kind"))
attachment = module_attachment_pb2.ModuleAttachment()
for value in collection_def.bytes_list.value:
attachment.ParseFromString(value)
result[attachment.key] = attachment.value # Immutable; needs no copy.
return result
def _export_tags(meta_graph, tags):
"""Exports tags into a MetaGraphDef."""
if tags is not None:
meta_graph.meta_info_def.tags.extend(tags)
def _check_asset_node_def(node_def):
"""Raises TypeError if `node_def` does not match the expectations."""
if node_def.op != "Const":
raise TypeError("Asset node must be of type constant.")
if tf.as_dtype(node_def.attr["dtype"].type) != tf.string:
raise TypeError("Asset node must be of dtype string.")
if len(node_def.attr["value"].tensor.string_val) != 1:
raise TypeError("Asset node must be a scalar.")
def _merge_assets_key_collection(saved_model_proto, path):
"""Merges the ASSETS_KEY collection into the GraphDefs in saved_model_proto.
Removes the ASSETS_KEY collection from the GraphDefs in the SavedModel and
modifies nodes with the assets filenames to point to the assets in `path`.
After this transformation, the SavedModel GraphDefs can be used without
feeding asset tensors.
Args:
saved_model_proto: SavedModel proto to be modified.
path: path where the SavedModel is being loaded from.
"""
for meta_graph in saved_model_proto.meta_graphs:
node_asset_map = {}
if tf.compat.v1.saved_model.ASSETS_KEY in meta_graph.collection_def:
assets_any_proto = meta_graph.collection_def[
tf.compat.v1.saved_model.ASSETS_KEY].any_list.value
for asset_any_proto in assets_any_proto:
asset_proto = meta_graph_pb2.AssetFileDef()
asset_any_proto.Unpack(asset_proto)
asset_filename = _get_asset_filename(path, asset_proto.filename)
node_asset_map[_get_node_name_from_tensor(
asset_proto.tensor_info.name)] = asset_filename
del meta_graph.collection_def[tf.compat.v1.saved_model.ASSETS_KEY]
for node in meta_graph.graph_def.node:
asset_filepath = node_asset_map.get(node.name)
if asset_filepath:
_check_asset_node_def(node)
node.attr["value"].tensor.string_val[0] = asset_filepath
def _make_assets_key_collection(saved_model_proto, export_path):
"""Creates an ASSETS_KEY collection in the GraphDefs in saved_model_proto.
Adds an ASSETS_KEY collection to the GraphDefs in the SavedModel and returns
a map from original asset filename to filename when exporting the SavedModel
to `export_path`.
This is roughly the inverse operation of `_merge_assets_key_collection`.
Args:
saved_model_proto: SavedModel proto to be modified.
export_path: string with path where the saved_model_proto will be exported.
Returns:
A map from original asset filename to asset filename when exporting the
SavedModel to path.
Raises:
ValueError: on unsuported/unexpected SavedModel.
"""
asset_filenames = {}
used_asset_filenames = set()
def _make_asset_filename(original_filename):
"""Returns the asset filename to use for the file."""
if original_filename in asset_filenames:
return asset_filenames[original_filename]
basename = os.path.basename(original_filename)
suggestion = basename
index = 0
while suggestion in used_asset_filenames:
suggestion = tf.compat.as_bytes(basename) + tf.compat.as_bytes(str(index))
index += 1
asset_filenames[original_filename] = suggestion
used_asset_filenames.add(suggestion)
return suggestion
for meta_graph in saved_model_proto.meta_graphs:
collection_def = meta_graph.collection_def.get(
tf.compat.v1.GraphKeys.ASSET_FILEPATHS)
if collection_def is None:
continue
if collection_def.WhichOneof("kind") != "node_list":
raise ValueError(
"MetaGraph collection ASSET_FILEPATHS is not a list of tensors.")
for tensor in collection_def.node_list.value:
if not tensor.endswith(":0"):
raise ValueError("Unexpected tensor in ASSET_FILEPATHS collection.")
asset_nodes = set([
_get_node_name_from_tensor(tensor)
for tensor in collection_def.node_list.value
])
tensor_filename_map = {}
for node in meta_graph.graph_def.node:
if node.name in asset_nodes:
_check_asset_node_def(node)
filename = node.attr["value"].tensor.string_val[0]
tensor_filename_map[node.name + ":0"] = filename
logging.debug("Found asset node %s pointing to %s", node.name, filename)
# Clear value to avoid leaking the original path.
node.attr["value"].tensor.string_val[0] = (
tf.compat.as_bytes("SAVEDMODEL-ASSET"))
if tensor_filename_map:
assets_key_collection = meta_graph.collection_def[
tf.compat.v1.saved_model.ASSETS_KEY]
for tensor, filename in sorted(tensor_filename_map.items()):
asset_proto = meta_graph_pb2.AssetFileDef()
asset_proto.filename = _make_asset_filename(filename)
asset_proto.tensor_info.name = tensor
assets_key_collection.any_list.value.add().Pack(asset_proto)
return {
original_filename: _get_asset_filename(export_path, asset_filename)
for original_filename, asset_filename in asset_filenames.items()
}
class SavedModelHandler(object):
"""SavedModelHandler helps using SavedModel disk format.
Note: This is a lower level interface than most users need. See SavedModel
Builder/Loader API for an higher-level API centered around exporting and
loading Sessions.
A SavedModel disk format represents a collection of Graphs. To allow these
graphs to be easy to manipulate, SavedModel extends Graphs with tags and
signatures. Additionally it packages graphs, assets and variable checkpoints
into an hermetic directory that can be moved around.
This class hides the implementation details of SavedModels, in particular
related with assets and signatures.
SavedModelHandler deals with assets by:
- Only supporting asset files as constant ops added to ASSET_FILEPATHS
collection.
- Creating a ASSETS_KEY collection only when writing meta_graphs to disk so
they are never visible to user.
- Baking the ASSETS_KEY collection in the graphs when loading from disk as
to hide that the assets point to the packaged assets.
SavedModelHandler deals with signatures by:
- Providing `add_signature` API that allows to declare signatures directly
on a graph.
- That API is supported by a collection that is not serialized, but instead
is converted into the right fields of MetaGraphDef when writing and
loading a SavedModel from disk.
"""
def __init__(self):
self._proto = saved_model_pb2.SavedModel()
def add_graph_copy(self, graph, tags=None):
"""Adds a copy of Graph with the specified set of tags."""
with graph.as_default():
# Remove default attrs so that Modules created by a tensorflow version
# with ops that have new attrs that are left to their default values can
# still be loaded by older versions unware of those attributes.
meta_graph = tf.compat.v1.train.export_meta_graph(
strip_default_attrs=True)
_export_tags(meta_graph, tags)
_export_signatures(meta_graph)
_export_module_attachments(meta_graph)
self._proto.meta_graphs.extend([meta_graph])
def add_meta_graph_copy(self, meta_graph):
self._proto.meta_graphs.extend([meta_graph])
def get_meta_graph_copy(self, tags=None):
"""Returns a copy of a MetaGraph with the identical set of tags."""
meta_graph = self.get_meta_graph(tags)
copy = tf.compat.v1.MetaGraphDef()
copy.CopyFrom(meta_graph)
return copy
@property
def meta_graphs(self):
return iter(self._proto.meta_graphs)
def get_tags(self):
"""Returns a list of set of tags."""
return sorted([frozenset(meta_graph.meta_info_def.tags)
for meta_graph in self.meta_graphs])
def get_attached_bytes_map(self, tags=None):
return get_attached_bytes_map(self.get_meta_graph(tags))
def export(self, path, variables_saver=None):
"""Exports to SavedModel directory.
Args:
path: path where to export the SavedModel to.
variables_saver: lambda that receives a directory path where to
export checkpoints of variables.
"""
# Operate on a copy of self._proto since it needs to be modified.
proto = saved_model_pb2.SavedModel()
proto.CopyFrom(self._proto)
assets_map = _make_assets_key_collection(proto, path)
self._save_all_assets(path, assets_map)
self._save_variables(path, variables_saver)
self._save_proto(path, proto)
def get_meta_graph(self, tags=None):
"""Returns the matching MetaGraphDef or raises KeyError."""
matches = [meta_graph
for meta_graph in self.meta_graphs
if set(meta_graph.meta_info_def.tags) == set(tags or [])]
if not matches:
raise KeyError("SavedModelHandler has no graph with tags: %r" % tags)
if len(matches) != 1:
raise KeyError(
"SavedModelHandler has multiple graphs with tags %r" % tags)
return matches[0]
def _save_all_assets(self, path, assets_map):
assets_dir = _get_assets_dir(path)
tf.compat.v1.gfile.MakeDirs(assets_dir)
for source, destination in assets_map.items():
tf.compat.v1.gfile.Copy(source, destination)
def _save_variables(self, path, variables_saver):
if variables_saver:
variables_path = get_variables_path(path)
variables_dir = os.path.dirname(variables_path)
tf.compat.v1.gfile.MakeDirs(variables_dir)
logging.debug("Variables saved in: %s", variables_path)
variables_saver(variables_path)
def _save_proto(self, path, proto):
proto_path = _get_saved_model_proto_path(path)
tf.compat.v1.gfile.MakeDirs(os.path.dirname(proto_path))
logging.debug("SavedModel saved in: %s", proto_path)
tf_utils.atomic_write_string_to_file(proto_path,
proto.SerializeToString(),
overwrite=True)
def _parse_saved_model(path):
"""Reads the savedmodel.pb file containing `SavedModel`."""
# Based on tensorflow/python/saved_model/loader.py implementation.
path_to_pb = _get_saved_model_proto_path(path)
file_content = tf.compat.v1.gfile.Open(path_to_pb, "rb").read()
saved_model = saved_model_pb2.SavedModel()
try:
saved_model.ParseFromString(file_content)
except message.DecodeError as e:
raise IOError("Cannot parse file %s: %s." % (path_to_pb, str(e)))
return saved_model
def load(path):
"""Creates a SavedModelHandler from a SavedModel in `path`."""
proto = _parse_saved_model(path)
_merge_assets_key_collection(proto, path)
handler = SavedModelHandler()
handler._proto = proto # pylint: disable=protected-access
return handler
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Less Frequently Used (LFU) Windows Registry plugin."""
from __future__ import unicode_literals
import unittest
from dfdatetime import filetime as dfdatetime_filetime
from dfwinreg import definitions as dfwinreg_definitions
from dfwinreg import fake as dfwinreg_fake
from plaso.parsers.winreg_plugins import lfu
from tests.parsers.winreg_plugins import test_lib
class BootExecutePluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the LFU BootExecute Windows Registry plugin."""
def _CreateTestKey(self, key_path, time_string):
"""Creates Registry keys and values for testing.
Args:
key_path (str): Windows Registry key path.
time_string (str): key last written date and time.
Returns:
dfwinreg.WinRegistryKey: a Windows Registry key.
"""
filetime = dfdatetime_filetime.Filetime()
filetime.CopyFromDateTimeString(time_string)
registry_key = dfwinreg_fake.FakeWinRegistryKey(
'Session Manager', key_path=key_path,
last_written_time=filetime.timestamp, offset=153)
value_data = 'autocheck autochk *\x00'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'BootExecute', data=value_data,
data_type=dfwinreg_definitions.REG_MULTI_SZ, offset=123)
registry_key.AddValue(registry_value)
value_data = '2592000'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'CriticalSectionTimeout', data=value_data,
data_type=dfwinreg_definitions.REG_SZ, offset=153)
registry_key.AddValue(registry_value)
value_data = '\x00'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'ExcludeFromKnownDlls', data=value_data,
data_type=dfwinreg_definitions.REG_MULTI_SZ, offset=163)
registry_key.AddValue(registry_value)
value_data = '0'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'GlobalFlag', data=value_data, data_type=dfwinreg_definitions.REG_SZ,
offset=173)
registry_key.AddValue(registry_value)
value_data = '0'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'HeapDeCommitFreeBlockThreshold', data=value_data,
data_type=dfwinreg_definitions.REG_SZ, offset=183)
registry_key.AddValue(registry_value)
value_data = '0'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'HeapDeCommitTotalFreeThreshold', data=value_data,
data_type=dfwinreg_definitions.REG_SZ, offset=203)
registry_key.AddValue(registry_value)
value_data = '0'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'HeapSegmentCommit', data=value_data,
data_type=dfwinreg_definitions.REG_SZ, offset=213)
registry_key.AddValue(registry_value)
value_data = '0'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'HeapSegmentReserve', data=value_data,
data_type=dfwinreg_definitions.REG_SZ, offset=223)
registry_key.AddValue(registry_value)
value_data = '2'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'NumberOfInitialSessions', data=value_data,
data_type=dfwinreg_definitions.REG_SZ, offset=243)
registry_key.AddValue(registry_value)
return registry_key
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = lfu.BootExecutePlugin()
key_path = (
'HKEY_LOCAL_MACHINE\\System\\ControlSet001\\Control\\Session Manager')
self._AssertFiltersOnKeyPath(plugin, key_path)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
def testProcess(self):
"""Tests the Process function."""
key_path = (
'HKEY_LOCAL_MACHINE\\System\\ControlSet001\\Control\\Session Manager')
time_string = '2012-08-31 20:45:29'
registry_key = self._CreateTestKey(key_path, time_string)
plugin = lfu.BootExecutePlugin()
storage_writer = self._ParseKeyWithPlugin(registry_key, plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 2)
events = list(storage_writer.GetEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '2012-08-31 20:45:29.000000')
event_data = self._GetEventDataOfEvent(storage_writer, event)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_data.parser, plugin.plugin_name)
self.assertEqual(event_data.data_type, 'windows:registry:boot_execute')
expected_message = (
'[{0:s}] '
'BootExecute: autocheck autochk *').format(key_path)
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
event = events[1]
self.CheckTimestamp(event.timestamp, '2012-08-31 20:45:29.000000')
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.data_type, 'windows:registry:key_value')
expected_message = (
'[{0:s}] '
'CriticalSectionTimeout: [REG_SZ] 2592000 '
'ExcludeFromKnownDlls: [REG_MULTI_SZ] [] '
'GlobalFlag: [REG_SZ] 0 '
'HeapDeCommitFreeBlockThreshold: [REG_SZ] 0 '
'HeapDeCommitTotalFreeThreshold: [REG_SZ] 0 '
'HeapSegmentCommit: [REG_SZ] 0 '
'HeapSegmentReserve: [REG_SZ] 0 '
'NumberOfInitialSessions: [REG_SZ] 2').format(key_path)
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
class BootVerificationPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the LFU BootVerification Windows Registry plugin."""
def _CreateTestKey(self, key_path, time_string):
"""Creates Registry keys and values for testing.
Args:
key_path (str): Windows Registry key path.
time_string (str): key last written date and time.
Returns:
dfwinreg.WinRegistryKey: a Windows Registry key.
"""
filetime = dfdatetime_filetime.Filetime()
filetime.CopyFromDateTimeString(time_string)
registry_key = dfwinreg_fake.FakeWinRegistryKey(
'BootVerificationProgram', key_path=key_path,
last_written_time=filetime.timestamp, offset=153)
value_data = 'C:\\WINDOWS\\system32\\googleupdater.exe'.encode(
'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'ImagePath', data=value_data, data_type=dfwinreg_definitions.REG_SZ,
offset=123)
registry_key.AddValue(registry_value)
return registry_key
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = lfu.BootVerificationPlugin()
key_path = (
'HKEY_LOCAL_MACHINE\\System\\ControlSet001\\Control\\'
'BootVerificationProgram')
self._AssertFiltersOnKeyPath(plugin, key_path)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
def testProcess(self):
"""Tests the Process function."""
key_path = '\\ControlSet001\\Control\\BootVerificationProgram'
time_string = '2012-08-31 20:45:29'
registry_key = self._CreateTestKey(key_path, time_string)
plugin = lfu.BootVerificationPlugin()
storage_writer = self._ParseKeyWithPlugin(registry_key, plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 1)
events = list(storage_writer.GetEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '2012-08-31 20:45:29.000000')
event_data = self._GetEventDataOfEvent(storage_writer, event)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_data.parser, plugin.plugin_name)
self.assertEqual(event_data.data_type, 'windows:registry:boot_verification')
expected_message = (
'[{0:s}] '
'ImagePath: C:\\WINDOWS\\system32\\googleupdater.exe').format(
key_path)
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
|
|
import os
import csv
from re import escape
from fractions import Fraction
from collections import defaultdict
import numpy as np
from scipy.sparse.linalg import svds
from sklearn.metrics.pairwise import manhattan_distances as manhat_dist
from sklearn.metrics.pairwise import euclidean_distances as euclid_dist
from sklearn.metrics.pairwise import chi2_kernel as chi2_dist
##############
# ENCODING #
##############
def notes2unichr(x):
notes_dict = {'C': 0, 'C#': 1, 'D': 2, 'D#': 3, 'E': 4, 'F': 5, 'F#': 6,
'G': 7, 'G#': 8, 'A': 9, 'Bb': 10, 'B': 11}
bin_array = [0]*12
for i in x:
bin_array[notes_dict[i]] = 1
return unichr(binaryString2int(array2str(bin_array)))
def chroma_str2notes(x):
notes = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'Bb', 'B']
return [notes[i] for i in xrange(12) if x[i] == '1']
def unichr2notes(x, as_string=True):
if as_string:
return ' '.join(chroma_str2notes(unichr2chroma_str(x)))
return chroma_str2notes(unichr2chroma_str(x))
def unichr2chroma_str(x):
return int2chroma_str(ord(x))
def binaryString2int(x):
return int(x, 2)
def int2chroma_str(x):
"""Converts an int to a binary Chroma encoding
Parameters
----------
x : int
Returns
-------
chroma_str : str
String representing a binary chroma
"""
chroma_str = "{:b}".format(x)
return "0"*(12-len(chroma_str))+chroma_str
def chroma_str2chroma(x):
"""Converts an int to a binary Chroma encoding
Parameters
----------
x : list of strings
List of chroma strings, e.g. '010101010101'
Returns
-------
chroma : np.array
Array representing a binary chroma
"""
return np.array([bool(int(i)) for i in x])
def array2str(x):
return ''.join(map(str, x))
def encodeChromagram(chromagram):
return map(unichr, map(binaryString2int, map(array2str, chromagram)))
def decodeChromagram(chromagram, as_str=False):
"""Decodes an list of unichr encoded chromagram to binary chromagram"
Parameters
----------
chromagram: unicode list
List of chromagrams encoded as unichr using the encodeChromagram method
is_unichar : bool
Is chromagram encoded as unichar
as_str : boolean
Decode into list of strings instead of traditional chromagrams
Returns
-------
chroma : str list
List of string binary encoding of chromagram
"""
chroma = [int2chroma_str(j) for j in [ord(i) for i in chromagram]]
if as_str:
return chroma
return np.array([chroma_str2chroma(i) for i in chroma])
def buildHistogram(seq, escape_chars=''):
"""Given some sequence, escape special characters a build a histogram in the
form of a dictionary
Parameters
----------
seq : list
List on which to compute histogram
escape_chars: list or string
Characters to be escaped with re.escape
Returns
-------
histogram : dict
Dictionary where keys are unique values in sequence, escaped if req. by
user, and values are key counts.
"""
def incrementDict(a_dict, key):
a_dict[key] += 1
histogram = defaultdict(Zero)
[incrementDict(histogram, char) if char not in escape_chars else
incrementDict(histogram, escape(char)) for char in seq]
return histogram
#######################
# UTILITY FUNCTIONS #
#######################
def getFractions(vals):
return np.array([Fraction(val) for val in vals])
def round2nearest(vals, fraction):
return np.round(np.array(vals, dtype=float) / fraction) * fraction
def getImmediateSubdirectories(folder_path):
return [name for name in os.listdir(folder_path)
if os.path.isdir(os.path.join(folder_path, name))]
###########
# MUSIC #
###########
def cropEdges(seq, condition=lambda x: np.sum(x) == 0):
start_i = 0
end_i = len(seq) - 1
for i in range(0, len(seq)):
if condition(seq[i]):
start_i = i
else:
break
for i in xrange(len(seq)-1, start_i, -1):
if condition(seq[i]):
end_i = i
else:
break
return seq[start_i:end_i]
def getDistanceFromKey(key_num, key=0):
if key_num > 11:
return key - ((key_num + 3) % 12)
return key - key_num
def segmentSequence(seq, window=4, dist_func=euclid_dist, thresh=2.5):
if len(seq) > window:
prv_txtr = seq[0:window].ravel()
segment_ids = []
for i in range(0+window, (len(seq)/window) * window, window):
cur_txtr = seq[i:i+window].ravel()
if euclid_dist(prv_txtr, cur_txtr, squared=True) > thresh:
segment_ids.append(i)
prv_txtr = cur_txtr
# do last window if necessary
if len(seq) % window != 0:
cur_txtr = seq[(len(seq)/window) * window]
return segment_ids
def getPhraseStarts(vals, window=4):
"""Naive implementation of phrase segmentation
Parameters
----------
vals : numpy array
Rows as features cols as timesteps
window : int
Window size in timesteps
"""
results = np.zeros(vals.shape[1], dtype=bool)
results[0] = True
for i in xrange(1, vals.shape[1] - window):
results[i] = abs(vals[:, i].mean() - vals[:, i:i+window].mean()) > \
vals[:, i:window].std() and not results[i-1]
return results
def reduceDimensionality(data, n_singv=0, threshold=0.9):
if n_singv > 0:
lsv, sv, rsv = svds(data, n_singv, which='LM')
else:
lsv, sv, rsv = svds(data, data.shape[1] - 1, which='LM')
# find number of singular values that explain 90% of variance
n_singv = 1
while np.sum(sv[-n_singv:]) / np.sum(sv) < threshold:
n_singv += 1
# compute reduced data and data projected onto principal components space
data_redu = np.dot(data, rsv.T)
data_proj = np.dot(lsv[:,-n_singv:],
np.dot(np.diag(sv[-n_singv:]), rsv[-n_singv:,]))
return data_redu, data_proj
##########
# MISC #
##########
def Zero():
return 0.0
def writeCSV(filename, items, delimiter=','):
with open(filename, 'wb') as f:
wr = csv.writer(f, delimiter=delimiter, quoting=csv.QUOTE_ALL)
for item in items:
wr.writerow([item])
def openCSV(filename):
with open(filename, 'rb') as f:
reader = csv.reader(f)
return list(reader)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class NetworkSecurityGroupsOperations(object):
"""NetworkSecurityGroupsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2017-08-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-08-01"
self.config = config
def _delete_initial(
self, resource_group_name, network_security_group_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, network_security_group_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security
group.
:type network_security_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, network_security_group_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security
group.
:type network_security_group_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NetworkSecurityGroup or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_08_01.models.NetworkSecurityGroup or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkSecurityGroup', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def _create_or_update_initial(
self, resource_group_name, network_security_group_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'NetworkSecurityGroup')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkSecurityGroup', response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkSecurityGroup', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, network_security_group_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a network security group in the specified resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security
group.
:type network_security_group_name: str
:param parameters: Parameters supplied to the create or update network
security group operation.
:type parameters:
~azure.mgmt.network.v2017_08_01.models.NetworkSecurityGroup
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
NetworkSecurityGroup or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_08_01.models.NetworkSecurityGroup]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('NetworkSecurityGroup', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all network security groups in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkSecurityGroup
:rtype:
~azure.mgmt.network.v2017_08_01.models.NetworkSecurityGroupPaged[~azure.mgmt.network.v2017_08_01.models.NetworkSecurityGroup]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkSecurityGroups'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkSecurityGroupPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkSecurityGroupPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all network security groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkSecurityGroup
:rtype:
~azure.mgmt.network.v2017_08_01.models.NetworkSecurityGroupPaged[~azure.mgmt.network.v2017_08_01.models.NetworkSecurityGroup]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkSecurityGroupPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkSecurityGroupPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
|
"""The GeoNet NZ Quakes integration."""
import asyncio
from datetime import timedelta
import logging
from aio_geojson_geonetnz_quakes import GeonetnzQuakesFeedManager
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
CONF_SCAN_INTERVAL,
CONF_UNIT_SYSTEM,
CONF_UNIT_SYSTEM_IMPERIAL,
LENGTH_MILES,
)
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util.unit_system import METRIC_SYSTEM
from .config_flow import configured_instances
from .const import (
CONF_MINIMUM_MAGNITUDE,
CONF_MMI,
DEFAULT_FILTER_TIME_INTERVAL,
DEFAULT_MINIMUM_MAGNITUDE,
DEFAULT_MMI,
DEFAULT_RADIUS,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
FEED,
PLATFORMS,
SIGNAL_DELETE_ENTITY,
SIGNAL_NEW_GEOLOCATION,
SIGNAL_STATUS,
SIGNAL_UPDATE_ENTITY,
)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_MMI, default=DEFAULT_MMI): vol.All(
vol.Coerce(int), vol.Range(min=-1, max=8)
),
vol.Optional(CONF_RADIUS, default=DEFAULT_RADIUS): vol.Coerce(float),
vol.Optional(
CONF_MINIMUM_MAGNITUDE, default=DEFAULT_MINIMUM_MAGNITUDE
): vol.All(vol.Coerce(float), vol.Range(min=0)),
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): cv.time_period,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the GeoNet NZ Quakes component."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
latitude = conf.get(CONF_LATITUDE, hass.config.latitude)
longitude = conf.get(CONF_LONGITUDE, hass.config.longitude)
mmi = conf[CONF_MMI]
scan_interval = conf[CONF_SCAN_INTERVAL]
identifier = f"{latitude}, {longitude}"
if identifier in configured_instances(hass):
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_LATITUDE: latitude,
CONF_LONGITUDE: longitude,
CONF_RADIUS: conf[CONF_RADIUS],
CONF_MINIMUM_MAGNITUDE: conf[CONF_MINIMUM_MAGNITUDE],
CONF_MMI: mmi,
CONF_SCAN_INTERVAL: scan_interval,
},
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up the GeoNet NZ Quakes component as config entry."""
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {}
if FEED not in hass.data[DOMAIN]:
hass.data[DOMAIN][FEED] = {}
radius = config_entry.data[CONF_RADIUS]
unit_system = config_entry.data[CONF_UNIT_SYSTEM]
if unit_system == CONF_UNIT_SYSTEM_IMPERIAL:
radius = METRIC_SYSTEM.length(radius, LENGTH_MILES)
# Create feed entity manager for all platforms.
manager = GeonetnzQuakesFeedEntityManager(hass, config_entry, radius, unit_system)
hass.data[DOMAIN][FEED][config_entry.entry_id] = manager
_LOGGER.debug("Feed entity manager added for %s", config_entry.entry_id)
await manager.async_init()
return True
async def async_unload_entry(hass, config_entry):
"""Unload an GeoNet NZ Quakes component config entry."""
manager = hass.data[DOMAIN][FEED].pop(config_entry.entry_id)
await manager.async_stop()
await asyncio.wait(
[
hass.config_entries.async_forward_entry_unload(config_entry, domain)
for domain in PLATFORMS
]
)
return True
class GeonetnzQuakesFeedEntityManager:
"""Feed Entity Manager for GeoNet NZ Quakes feed."""
def __init__(self, hass, config_entry, radius_in_km, unit_system):
"""Initialize the Feed Entity Manager."""
self._hass = hass
self._config_entry = config_entry
coordinates = (
config_entry.data[CONF_LATITUDE],
config_entry.data[CONF_LONGITUDE],
)
websession = aiohttp_client.async_get_clientsession(hass)
self._feed_manager = GeonetnzQuakesFeedManager(
websession,
self._generate_entity,
self._update_entity,
self._remove_entity,
coordinates,
mmi=config_entry.data[CONF_MMI],
filter_radius=radius_in_km,
filter_minimum_magnitude=config_entry.data[CONF_MINIMUM_MAGNITUDE],
filter_time=DEFAULT_FILTER_TIME_INTERVAL,
status_callback=self._status_update,
)
self._config_entry_id = config_entry.entry_id
self._scan_interval = timedelta(seconds=config_entry.data[CONF_SCAN_INTERVAL])
self._unit_system = unit_system
self._track_time_remove_callback = None
self._status_info = None
self.listeners = []
async def async_init(self):
"""Schedule initial and regular updates based on configured time interval."""
for domain in PLATFORMS:
self._hass.async_create_task(
self._hass.config_entries.async_forward_entry_setup(
self._config_entry, domain
)
)
async def update(event_time):
"""Update."""
await self.async_update()
# Trigger updates at regular intervals.
self._track_time_remove_callback = async_track_time_interval(
self._hass, update, self._scan_interval
)
_LOGGER.debug("Feed entity manager initialized")
async def async_update(self):
"""Refresh data."""
await self._feed_manager.update()
_LOGGER.debug("Feed entity manager updated")
async def async_stop(self):
"""Stop this feed entity manager from refreshing."""
for unsub_dispatcher in self.listeners:
unsub_dispatcher()
self.listeners = []
if self._track_time_remove_callback:
self._track_time_remove_callback()
_LOGGER.debug("Feed entity manager stopped")
@callback
def async_event_new_entity(self):
"""Return manager specific event to signal new entity."""
return SIGNAL_NEW_GEOLOCATION.format(self._config_entry_id)
def get_entry(self, external_id):
"""Get feed entry by external id."""
return self._feed_manager.feed_entries.get(external_id)
def status_info(self):
"""Return latest status update info received."""
return self._status_info
async def _generate_entity(self, external_id):
"""Generate new entity."""
async_dispatcher_send(
self._hass,
self.async_event_new_entity(),
self,
external_id,
self._unit_system,
)
async def _update_entity(self, external_id):
"""Update entity."""
async_dispatcher_send(self._hass, SIGNAL_UPDATE_ENTITY.format(external_id))
async def _remove_entity(self, external_id):
"""Remove entity."""
async_dispatcher_send(self._hass, SIGNAL_DELETE_ENTITY.format(external_id))
async def _status_update(self, status_info):
"""Propagate status update."""
_LOGGER.debug("Status update received: %s", status_info)
self._status_info = status_info
async_dispatcher_send(self._hass, SIGNAL_STATUS.format(self._config_entry_id))
|
|
# -*- coding: utf-8 -*-
import time
import logging
import numpy as np
from load import load
from collections import Counter
log = logging.getLogger(__name__)
def distance(f, t):
return int(np.round(np.sqrt(np.power(np.abs(f.pos[0] - t.pos[0]), 2) + np.power(np.abs(f.pos[1] - t.pos[1]), 2))))
gl_warehouses = {}
gl_drones = {}
gl_products = {}
gl_orders = {}
class Warehouse(object):
def __init__(self, args, id, pos, products):
self.args = args
self.id = id
self.pos = pos
self._stock = {}
for i in range(args['nr_product_T']):
self._stock[i] = products[i]
def take(self, p_T, count):
print("Warehouse %d: Taking %d of product %d." % (self.id, p_T, count))
assert(self._stock[p_T] >= count)
self._stock[p_T] -= count
def can_fulfil(self, order):
count = Counter()
for p_T, c in order._products.items():
if self._stock[p_T] < c:
count['not_available'] += 1
else:
count['available'] += 1
count['count'] += 1
#print("Warehouse %d, Order %d: %d/%d products available" % (self.id, order._id, count['available'], count['count']))
return count
def stock(self, p_T):
return self._stock[p_T]
class Order(object):
def __init__(self, args, id, to, products):
self._id = id
self.pos = to
self.args = args
self._products = {}
for i in range(args['nr_product_T']):
s = sum([1 for p in products if p == i])
if s > 0:
self._products[i] = s
self.served = False
def weight(self):
return sum([v * self.args['product_T'][k] for k, v in self._products.items()])
class Drone(object):
def __init__(self, args, id, pos, capacity):
self.args = args
self._id = id
self._capacity = capacity
self._current_load = 0
self._load = Counter()
self.pos = pos
self.free_at = 0
def free(self, time):
if self.free_at <= time:
return True
else:
return False
def space(self, p_T):
return np.floor((self._capacity - self._current_load) / self.args['product_T'][p_T])
def put(self, p_T, count):
print("Drone %d: Loading %d times product %d" % (self._id, count, p_T))
assert(count > 0)
self._load[p_T] += count
self._current_load += self.args['product_T'][p_T] * count
assert(self._current_load <= self._capacity)
print("Drone %d: Utilization %d/%d" % (self._id, self._current_load, self._capacity))
def pull(self, p_T, count):
print("Drone %d: Unloading %d times product %d" % (self._id, count, p_T))
assert(count > 0)
self._load[p_T] -= count
self._current_load -= self.args['product_T'][p_T] * count
assert(self._current_load >= 0)
def serve(self, order, warehouses):
order.served = True
takes_t = 0
for w in warehouses:
takes = {}
for p_T, count in order._products.items():
print("Order %d: We need %d times %d" % (order._id, count, p_T))
print("Counters: Warehouse (%d) / Order (%d) / space left (%d)" % (w.stock(p_T), count, self.space(p_T)))
takes[p_T] = min(w.stock(p_T), count, self.space(p_T))
takes_t += self.load(w, p_T, takes[p_T])
for p_T, count in takes.items():
takes_t += self.deliver(order, p_T, count)
return takes_t
def serve_multiple_trips(self, order, warehouses):
order.served = True
takes_t = 0
for w in warehouses:
while sum([count for _, count in order._products.items()]) > 0:
takes = {}
for p_T, count in order._products.items():
print("Order %d: We need %d times %d" % (order._id, count, p_T))
print("Counters: Warehouse (%d) / Order (%d) / space left (%d)" % (w.stock(p_T), count, self.space(p_T)))
c = min(w.stock(p_T), count, self.space(p_T))
if c == 0:
continue
takes[p_T] = c
takes_t += self.load(w, p_T, takes[p_T])
for p_T, count in takes.items():
takes_t += self.deliver(order, p_T, count)
return takes_t
def load(self, warehouse, p_T, count):
warehouse._stock[p_T] -= count
self.put(p_T, count)
self.args['commands'].append("%d L %d %d %d" % (self._id, warehouse.id, p_T, count))
takes_t = distance(warehouse, self)
self.pos = warehouse.pos
return takes_t + 1
def unload(self, warehouse, p_T, count):
print("Drone %d unloading at warehouse %d product type %d %d times." % (self._id, warehouse.id, p_T, count))
self.args['commands'].append("%d U %d %d %d" % (self._id, warehouse.id, p_T, count))
self.pos = warehouse.pos
return 1
def deliver(self, order, p_T, count):
order._products[p_T] -= count
self.pull(p_T, count)
fa = (self._id, order._id, p_T, count)
print("Drone %d delivering for order %d product type %d %d of them." % fa)
self.args['commands'].append("%d D %d %d %d" % fa)
takes_t = distance(order, self)
self.pos = order.pos
return takes_t + 1
def run(args):
global gl_warehouses, gl_drones, gl_products, gl_orders
gl_warehouses = {}
gl_drones = {}
gl_products = {}
gl_orders = {}
start = time.time()
args['commands'] = []
args['gridsize'] = (args['cols'], args['rows'])
#args['time_limit']
args['product_T'] = args['product_types']
args['nr_product_T'] = len(args['product_types'])
#args['product_types']
# Drone
for idx, d in enumerate(args['drones']):
gl_drones[idx] = Drone(args, idx, d['coords'], args['max_payload'])
# Warehouses
for idx, d in enumerate(args['warehouses']):
gl_warehouses[idx] = Warehouse(args, idx, d['coords'], d['products'])
# Orders
for idx, d in enumerate(args['orders']):
gl_orders[idx] = Order(args, idx, d['coords'], d['products'])
print("### Loop ###")
order_c, score = loop(args)
print("### Loop end (%d orders served, score %d)###" % (order_c, score))
# Save to file
solutionf = open("solution_%s.txt" % args['scenario'], 'w')
print("%d" % len(args['commands']), file=solutionf)
for cmd in args['commands']:
print(cmd, file=solutionf)
duration = time.time() - start
log.info("Simulation took %.2fs." % duration)
return score
def loop(args):
easy_orders = []
for order in gl_orders.values():
if order.weight() <= args['max_payload']:
for w in gl_warehouses.values():
if w.can_fulfil(order)['not_available'] == 0:
easy_orders.append(order)
break
print("Number of easy orders: %d" % len(easy_orders))
score = 0
orders_c = 0
for NOW in range(args['time_limit']):
# gather all free drones
free = [d for d in gl_drones.values() if d.free(NOW)]
# Go through all non served orders
non_served = [tmp for tmp in gl_orders.values() if not tmp.served]
c = Counter()
for order in non_served:
tmp = [1 for _, w in gl_warehouses.items() if w.can_fulfil(order)['not_available'] == 0]
if len(tmp) > 0:
c['at_stock_at_single_w'] += 1
if order.weight() <= args['max_payload']:
c['order_fits_payload'] += 1
c['counter'] += 1
print("%d/%d orders can be fulfilled by only one warehouse trip." % (c['at_stock_at_single_w'], c['counter']))
print("%d of those orders can be completed by one drone trip." % c['order_fits_payload'])
print("%d: %d/%d drones free right now." % (NOW, len(free), len(gl_drones)))
while len(free) > 0 and len(easy_orders) > 0:
drone = free.pop()
order = easy_orders.pop()
for _, w in gl_warehouses.items():
if w.can_fulfil(order)['not_available'] == 0:
takes_t = drone.serve(order, [w])
drone.free_at = NOW + takes_t
score += np.ceil((args['time_limit'] - drone.free_at)/args['time_limit']*100)
orders_c += 1
break
if len(easy_orders) == 0:
break
print("END GAME MODE!")
easy_orders = []
for order in [o for o in gl_orders.values() if not o.served]:
for _, w in gl_warehouses.items():
if w.can_fulfil(order)['not_available'] == 0:
easy_orders.append(order)
break
for NOW in range(NOW, args['time_limit']):
free = [d for d in gl_drones.values() if d.free(NOW)]
while len(free) > 0 and len(easy_orders) > 0:
drone = free.pop()
order = easy_orders.pop()
for _, w in gl_warehouses.items():
if w.can_fulfil(order)['not_available'] == 0:
print("Multiple trips")
takes_t = drone.serve_multiple_trips(order, [w])
drone.free_at = NOW + takes_t
score += np.ceil((args['time_limit'] - drone.free_at)/args['time_limit']*100)
orders_c += 1
break
if len(easy_orders) == 0:
return orders_c, score
if __name__ == "__main__":
scores = {}
for i in ['redundancy.in', 'busy_day.in', 'mother_of_all_warehouses.in']:
sim = load(i)
scores[i] = run(sim)
print("--------------------")
print("Scores:")
for i, score in scores.items():
print("%s: %d" % (i, score))
print("--------------------")
print("Total score: %d" % sum(scores.values()))
|
|
"""
:mod:`dic.dic_io` provides IO operations for DIC and associated files.
This includes loading (and possibly modifying) DIC data from ``.MAT`` files and MTS data from ``.CSV`` files.
In addition, the ability to find and sort image (or general) filenames is provided.
"""
from __future__ import absolute_import, division, print_function
import itertools
import multiprocessing as mp
import numpy as np
import os
import pandas as pd
import scipy.io as spio
import sys
from tqdm import tqdm
import warnings
__all__ = ["load_dic_data", "load_csv_data", "get_filenames", "get_image_filenames", "update_dic_data"]
def get_filenames(directory, extension, sort=True, prepend_directory=False):
"""
Retrieves a list of file names in the specified directory with the given extension.
Parameters
----------
directory : str
Directory to search.
extension : str
Extension to search for (including the period, if applicable).
sort : bool, optional
Whether to sort the files. Default = ``True``.
prepend_directory : bool, optional
Whether to prepend the directory to the filenames. Default = ``False``.
Returns
-------
List[str]
List of file names that match the given extension
"""
output = []
for f in os.listdir(directory):
fbase, fext = os.path.splitext(f)
if fext == extension:
output.append(f)
if sort:
output.sort()
if prepend_directory:
output = [os.path.join(directory, f) for f in output]
return output
def get_image_filenames(directory, extension=".tif", prepend_directory=False):
"""
Sorts and retrieves the camera images in the specified directory. This function assumes that the left camera images
end in ``_0.ext``, where ``.ext`` is the provided extension. Similarly, the right camera images are assumed to
end in ``_1.ext``.
Parameters
----------
directory : str
Directory to search.
extension : str, optional
Image extension. Default = ``".tif"``.
prepend_directory : bool, optional
Whether to prepend the directory to the filenames. Default = ``False``.
Returns
-------
(left_camera_filenames, right_camera_filenames) : (List[str], List[str])
List of filenames belonging to each camera.
"""
filenames = get_filenames(directory, extension, prepend_directory=prepend_directory)
left_camera_filenames = []
right_camera_filenames = []
left_camera_required_text = "_0{}".format(extension)
right_camera_required_text = "_1{}".format(extension)
for f in filenames:
if f[-len(left_camera_required_text):] == left_camera_required_text:
left_camera_filenames.append(f)
elif f[-len(right_camera_required_text):] == right_camera_required_text:
right_camera_filenames.append(f)
else:
warnings.warn("Unable to categorize file {} as belonging to the left or right camera.".format(f))
return left_camera_filenames, right_camera_filenames
def _mask_bad_data(dic_data, mask_key="sigma", bad_value=-1.0):
"""
Masks (i.e. removes) the uncorrelated data for each key in the dictionary. The positions of bad values are
determined by the entries of ``dic_data[mask_key]`` that are equal to ``bad_value``. The same mask is
applied to all keys in the dictionary that reference correlated data. The only keys not updated are ``x`` and ``y``
--those correspond to the pixel positions and are not dependent on whether the correlation was successful--
as well as double underscore keys, e.g. ``__key__``. This assumes that all correlated values in the
dictionary have the same shape. The dictionary is updated in-place. The keys remain the same, but the values are
masked after the function call.
Parameters
----------
dic_data : dict
Dictionary of ``{key: value}`` pairs.
mask_key : str
Key in ``dic_data`` to search for ``bad_value``s
bad_value : float
Value that corresponds to uncorrelated data for the specified ``mask_key``.
"""
bad_mask = np.isclose(dic_data[mask_key], bad_value)
unmasked_keys = ["x", "y"]
for key, value in iter(dic_data.items()):
if key[:2] != '__' and key not in unmasked_keys:
if key == mask_key:
fill_value = bad_value
else:
fill_value = None
dic_data[key] = np.ma.masked_array(value, mask=bad_mask, fill_value=fill_value)
def _unmask_bad_data(dic_data):
"""
Unmasks the data for each key in the dictionary by replacing all ``numpy.MaskedArray``s with the their unmasked data.
The keys remain the same, but the all ``numpy.MaskedArray`` instances are converted to ``numpy.ndarray``.
Parameters
----------
dic_data : dict
Dictionary of ``{key: value}`` pairs.
"""
for key, value in iter(dic_data.items()):
if isinstance(value, np.ma.MaskedArray):
dic_data[key] = np.ma.filled(value)
def _store_min_pixel_values(dic_data):
"""
Stores the minimum ``x`` and ``y`` pixel values in the ``x_min`` ``y_min`` keys, respectively.
These values are used every time a point is mapped from pixel to space to the corresponding row and column indices
in the DIC data. Caching the values significantly reduces the conversion time. To ensure that the minimum values
are valid, the ``x`` and ``y`` arrays are set to read-only when this function is called.
Parameters
----------
dic_data : dict
Dictionary of ``{key: value}`` pairs.
"""
for key in ["x", "y"]:
try:
min_val = dic_data[key].min()
dic_data["{:s}_min".format(key)] = min_val
dic_data[key].setflags(write=False)
except KeyError:
pass
def _make_3D(dic_data):
"""
Converts 2D DIC data to 3D by adding position data for ``z`` and displacement data for ``w``
if either are not already present. If missing, the values for each key are set to zero.
If data is provided in millimeters (i.e. ``X``, ``Y``, ``U``, ``V`` keys are present) ``Z``
and ``W`` are also added.
Parameters
----------
dic_data : dict
Dictionary of ``{key: value}`` pairs.
"""
# add keys Z and W only if data in mm is provided
if "X" in dic_data:
threed_keys = ("z", "w", "Z", "W")
else:
threed_keys = ("z", "w")
for key in threed_keys:
if key not in dic_data:
arr = np.zeros_like(dic_data["sigma"].data)
dic_data[key] = np.ma.masked_array(arr, mask=dic_data["sigma"].mask)
def load_dic_data(filename, variable_names=None):
"""
Loads the DIC data specified by the given filename. Uncorrelated regions of the dataset are masked (i.e. removed)
before the data is returned. Prior to loading the data must be exported into the Matlab (``.mat``) file format.
Parameters
----------
filename : str
Name of the DIC data file to load.
variable_names : None or sequence
If ``None`` (the default) - read all variables in file. Otherwise variable_names should be a sequence of
strings, giving names of the matlab variables to read from the file. The reader will skip any variable
with a name not in this sequence, possibly saving some read processing.
Returns
-------
dict
A dictionary of ``{key: value}`` pairs where each key is the variable name,
e.g. ``X``, ``U``, ``Z``, etc., and the value is a 2D numpy array containing the
exported DIC results.
"""
dic_data = spio.loadmat(filename, variable_names=variable_names)
_mask_bad_data(dic_data)
_make_3D(dic_data)
_store_min_pixel_values(dic_data)
return dic_data
def _get_csv_header_names(filename):
"""
Returns the column headers for the given filename.
Parameters
----------
filename : str
Name of the ``.csv`` file to search.
Returns
-------
List[str] or None
List of header names or ``None`` is no header was found.
Raises
------
RuntimeError
If the first entry of the first row is not 0 or Count.
"""
with open(filename, "r") as csvfile:
first_line = csvfile.readline()
names = first_line.strip("\n").split(",")
if names[0] == '0':
return None
elif names[0] == "Count":
return names
else:
raise RuntimeError("Could not determine header.")
def load_csv_data(filename, column, scale=None):
"""
Loads the specifed column of the csv data from the given filename. Values are multiplied by ``scale`` before the
data is returned.
Parameters
----------
filename : str
Name of the ``.csv`` file to load.
column : int
Column index to load from the file.
scale : float, optional
Value to scale the data by before returning. For example, when loading MTS data the scale variable
can be used to change the native output (Volts) to force (N) by providing ``scale`` that is equal to
the Newtons/Volts. Default is ``None``.
Returns
-------
``numpy.ndarray``
1D array of data values.
"""
header_names = _get_csv_header_names(filename)
if header_names is not None:
header_row = 0
else:
header_row = None
csv_dataframe = pd.read_csv(filename, delimiter=",", header=header_row, names=header_names)
csv_values = csv_dataframe.ix[:,column].values
if scale is not None:
csv_values *= scale
return csv_values
def _update_dic_data_worker(worker_args):
"""
Processes a single DIC file and saves the output.
Parameters
----------
worker_args : (str, callable, tuple, str, str, bool)
Arguments needed to process a single DIC file,
namely ``(dic_filename, function, args, input_directory, output_directory, compress)``
"""
dic_filename, function, args, input_directory, output_directory, compress = worker_args
dic_data = load_dic_data(os.path.join(input_directory, dic_filename))
if function is not None:
function(dic_data, *args)
_unmask_bad_data(dic_data)
output_filename = os.path.join(output_directory, dic_filename)
spio.savemat(output_filename, dic_data, do_compression=compress)
def update_dic_data(input_directory, output_directory, function=None, args=(), compress=False, processes=None):
"""
Calls ``function`` on each DIC file and saves the new version into the output directory.
Parameters
----------
input_directory : str
Path to uncompressed ``.mat`` files.
output_directory : str
Where to save the compressed files.
function : callable, optional
Function that accepts a ``dict`` and modifies the object in place. Default is ``None``, i.e. no function
is called.
args : tuple, optional
Extra arguments passed to function, i.e. ``f(dic_data, *args)``.
output_directory : str
Directory to store the results.
compress : bool, optional
Whether to compress the DIC files when saving to the output directory.
processes : int, optional
The number of processes to use when converting the files. Default is ``None``.
If ``None`` is provided then the number of processes will be set to the value
returned by ``multiprocessing.cpu_count()``.
"""
if not os.path.exists(output_directory):
os.makedirs(output_directory)
if not isinstance(args, tuple):
args = (args,)
dic_filenames = get_filenames(input_directory, ".mat")
num_dic_filenames = len(dic_filenames)
worker_args = zip(
dic_filenames,
itertools.repeat(function, num_dic_filenames),
itertools.repeat(args, num_dic_filenames),
itertools.repeat(input_directory, num_dic_filenames),
itertools.repeat(output_directory, num_dic_filenames),
itertools.repeat(compress, num_dic_filenames)
)
if processes is None:
processes = mp.cpu_count()
pool = mp.Pool(processes=processes)
for _ in tqdm(pool.imap_unordered(_update_dic_data_worker, worker_args),
total=num_dic_filenames, file=sys.stdout, desc="Processing DIC files"):
pass
pool.close()
pool.join()
print("DIC files updated.")
|
|
import os
import socket
import time
from OpenSSL import SSL
from netlib.exceptions import HttpReadDisconnect, HttpException
from netlib.tcp import Address
import netlib.tutils
from netlib import tcp, http, socks
from netlib.certutils import SSLCert
from netlib.http import authentication, CONTENT_MISSING, http1
from netlib.tutils import raises
from libpathod import pathoc, pathod
from libmproxy.proxy.config import HostMatcher
from libmproxy.protocol import Kill
from libmproxy.models import Error, HTTPResponse
from . import tutils, tservers
"""
Note that the choice of response code in these tests matters more than you
might think. libcurl treats a 304 response code differently from, say, a
200 response code - it will correctly terminate a 304 response with no
content-length header, whereas it will block forever waiting for content
for a 200 response.
"""
class CommonMixin:
def test_large(self):
assert len(self.pathod("200:b@50k").content) == 1024 * 50
@staticmethod
def wait_until_not_live(flow):
"""
Race condition: We don't want to replay the flow while it is still live.
"""
s = time.time()
while flow.live:
time.sleep(0.001)
if time.time() - s > 5:
raise RuntimeError("Flow is live for too long.")
def test_replay(self):
assert self.pathod("304").status_code == 304
if isinstance(self, tservers.HTTPUpstreamProxTest) and self.ssl:
assert len(self.master.state.view) == 2
else:
assert len(self.master.state.view) == 1
l = self.master.state.view[-1]
assert l.response.status_code == 304
l.request.path = "/p/305"
self.wait_until_not_live(l)
rt = self.master.replay_request(l, block=True)
assert l.response.status_code == 305
# Disconnect error
l.request.path = "/p/305:d0"
rt = self.master.replay_request(l, block=True)
assert not rt
if isinstance(self, tservers.HTTPUpstreamProxTest):
assert l.response.status_code == 502
else:
assert l.error
# Port error
l.request.port = 1
# In upstream mode, we get a 502 response from the upstream proxy server.
# In upstream mode with ssl, the replay will fail as we cannot establish
# SSL with the upstream proxy.
rt = self.master.replay_request(l, block=True)
assert not rt
if isinstance(self, tservers.HTTPUpstreamProxTest):
assert l.response.status_code == 502
else:
assert l.error
def test_http(self):
f = self.pathod("304")
assert f.status_code == 304
# In Upstream mode with SSL, we may already have a previous CONNECT
# request.
l = self.master.state.view[-1]
assert l.client_conn.address
assert "host" in l.request.headers
assert l.response.status_code == 304
def test_invalid_http(self):
t = tcp.TCPClient(("127.0.0.1", self.proxy.port))
t.connect()
t.wfile.write("invalid\r\n\r\n")
t.wfile.flush()
line = t.rfile.readline()
assert ("Bad Request" in line) or ("Bad Gateway" in line)
def test_sni(self):
if not self.ssl:
return
f = self.pathod("304", sni="testserver.com")
assert f.status_code == 304
log = self.server.last_log()
assert log["request"]["sni"] == "testserver.com"
class TcpMixin:
def _ignore_on(self):
assert not hasattr(self, "_ignore_backup")
self._ignore_backup = self.config.check_ignore
self.config.check_ignore = HostMatcher(
[".+:%s" % self.server.port] + self.config.check_ignore.patterns)
def _ignore_off(self):
assert hasattr(self, "_ignore_backup")
self.config.check_ignore = self._ignore_backup
del self._ignore_backup
def test_ignore(self):
n = self.pathod("304")
self._ignore_on()
i = self.pathod("305")
i2 = self.pathod("306")
self._ignore_off()
self.master.masterq.join()
assert n.status_code == 304
assert i.status_code == 305
assert i2.status_code == 306
assert any(f.response.status_code == 304 for f in self.master.state.flows)
assert not any(f.response.status_code == 305 for f in self.master.state.flows)
assert not any(f.response.status_code == 306 for f in self.master.state.flows)
# Test that we get the original SSL cert
if self.ssl:
i_cert = SSLCert(i.sslinfo.certchain[0])
i2_cert = SSLCert(i2.sslinfo.certchain[0])
n_cert = SSLCert(n.sslinfo.certchain[0])
assert i_cert == i2_cert
assert i_cert != n_cert
# Test Non-HTTP traffic
spec = "200:i0,@100:d0" # this results in just 100 random bytes
# mitmproxy responds with bad gateway
assert self.pathod(spec).status_code == 502
self._ignore_on()
with raises(HttpException):
self.pathod(spec) # pathoc tries to parse answer as HTTP
self._ignore_off()
def _tcpproxy_on(self):
assert not hasattr(self, "_tcpproxy_backup")
self._tcpproxy_backup = self.config.check_tcp
self.config.check_tcp = HostMatcher(
[".+:%s" % self.server.port] + self.config.check_tcp.patterns)
def _tcpproxy_off(self):
assert hasattr(self, "_tcpproxy_backup")
self.config.check_tcp = self._tcpproxy_backup
del self._tcpproxy_backup
def test_tcp(self):
n = self.pathod("304")
self._tcpproxy_on()
i = self.pathod("305")
i2 = self.pathod("306")
self._tcpproxy_off()
self.master.masterq.join()
assert n.status_code == 304
assert i.status_code == 305
assert i2.status_code == 306
assert any(f.response.status_code == 304 for f in self.master.state.flows)
assert not any(f.response.status_code == 305 for f in self.master.state.flows)
assert not any(f.response.status_code == 306 for f in self.master.state.flows)
# Test that we get the original SSL cert
if self.ssl:
i_cert = SSLCert(i.sslinfo.certchain[0])
i2_cert = SSLCert(i2.sslinfo.certchain[0])
n_cert = SSLCert(n.sslinfo.certchain[0])
assert i_cert == i2_cert == n_cert
# Make sure that TCP messages are in the event log.
assert any("305" in m for m in self.master.log)
assert any("306" in m for m in self.master.log)
class AppMixin:
def test_app(self):
ret = self.app("/")
assert ret.status_code == 200
assert "mitmproxy" in ret.content
class TestHTTP(tservers.HTTPProxTest, CommonMixin, AppMixin):
def test_app_err(self):
p = self.pathoc()
ret = p.request("get:'http://errapp/'")
assert ret.status_code == 500
assert "ValueError" in ret.content
def test_invalid_connect(self):
t = tcp.TCPClient(("127.0.0.1", self.proxy.port))
t.connect()
t.wfile.write("CONNECT invalid\n\n")
t.wfile.flush()
assert "Bad Request" in t.rfile.readline()
def test_upstream_ssl_error(self):
p = self.pathoc()
ret = p.request("get:'https://localhost:%s/'" % self.server.port)
assert ret.status_code == 400
def test_connection_close(self):
# Add a body, so we have a content-length header, which combined with
# HTTP1.1 means the connection is kept alive.
response = '%s/p/200:b@1' % self.server.urlbase
# Lets sanity check that the connection does indeed stay open by
# issuing two requests over the same connection
p = self.pathoc()
assert p.request("get:'%s'" % response)
assert p.request("get:'%s'" % response)
# Now check that the connection is closed as the client specifies
p = self.pathoc()
assert p.request("get:'%s':h'Connection'='close'" % response)
# There's a race here, which means we can get any of a number of errors.
# Rather than introduce yet another sleep into the test suite, we just
# relax the Exception specification.
with raises(Exception):
p.request("get:'%s'" % response)
def test_reconnect(self):
req = "get:'%s/p/200:b@1:da'" % self.server.urlbase
p = self.pathoc()
assert p.request(req)
# Server has disconnected. Mitmproxy should detect this, and reconnect.
assert p.request(req)
assert p.request(req)
def test_get_connection_switching(self):
def switched(l):
for i in l:
if "serverdisconnect" in i:
return True
req = "get:'%s/p/200:b@1'"
p = self.pathoc()
assert p.request(req % self.server.urlbase)
assert p.request(req % self.server2.urlbase)
assert switched(self.proxy.log)
def test_blank_leading_line(self):
p = self.pathoc()
req = "get:'%s/p/201':i0,'\r\n'"
assert p.request(req % self.server.urlbase).status_code == 201
def test_invalid_headers(self):
p = self.pathoc()
resp = p.request("get:'http://foo':h':foo'='bar'")
assert resp.status_code == 400
def test_stream(self):
self.master.set_stream_large_bodies(1024 * 2)
self.pathod("200:b@1k")
assert not self.master.state.view[-1].response.stream
assert len(self.master.state.view[-1].response.content) == 1024 * 1
self.pathod("200:b@3k")
assert self.master.state.view[-1].response.stream
assert self.master.state.view[-1].response.content == CONTENT_MISSING
self.master.set_stream_large_bodies(None)
def test_stream_modify(self):
self.master.load_script(
tutils.test_data.path("scripts/stream_modify.py"))
d = self.pathod('200:b"foo"')
assert d.content == "bar"
self.master.unload_scripts()
class TestHTTPAuth(tservers.HTTPProxTest):
authenticator = http.authentication.BasicProxyAuth(
http.authentication.PassManSingleUser(
"test",
"test"),
"realm")
def test_auth(self):
assert self.pathod("202").status_code == 407
p = self.pathoc()
ret = p.request("""
get
'http://localhost:%s/p/202'
h'%s'='%s'
""" % (
self.server.port,
http.authentication.BasicProxyAuth.AUTH_HEADER,
authentication.assemble_http_basic_auth("basic", "test", "test")
))
assert ret.status_code == 202
class TestHTTPS(tservers.HTTPProxTest, CommonMixin, TcpMixin):
ssl = True
ssloptions = pathod.SSLOptions(request_client_cert=True)
def test_clientcert_file(self):
try:
self.config.clientcerts = os.path.join(
tutils.test_data.path("data/clientcert"), "client.pem")
f = self.pathod("304")
assert f.status_code == 304
assert self.server.last_log()["request"]["clientcert"]["keyinfo"]
finally:
self.config.clientcerts = None
def test_clientcert_dir(self):
try:
self.config.clientcerts = tutils.test_data.path("data/clientcert")
f = self.pathod("304")
assert f.status_code == 304
assert self.server.last_log()["request"]["clientcert"]["keyinfo"]
finally:
self.config.clientcerts = None
def test_error_post_connect(self):
p = self.pathoc()
assert p.request("get:/:i0,'invalid\r\n\r\n'").status_code == 400
class TestHTTPSCertfile(tservers.HTTPProxTest, CommonMixin):
ssl = True
certfile = True
def test_certfile(self):
assert self.pathod("304")
class TestHTTPSUpstreamServerVerificationWTrustedCert(tservers.HTTPProxTest):
"""
Test upstream server certificate verification with a trusted server cert.
"""
ssl = True
ssloptions = pathod.SSLOptions(
cn="trusted-cert",
certs=[
("trusted-cert", tutils.test_data.path("data/trusted-server.crt"))
])
def test_verification_w_cadir(self):
self.config.openssl_verification_mode_server = SSL.VERIFY_PEER
self.config.openssl_trusted_cadir_server = tutils.test_data.path(
"data/trusted-cadir/")
self.pathoc()
def test_verification_w_pemfile(self):
self.config.openssl_verification_mode_server = SSL.VERIFY_PEER
self.config.openssl_trusted_ca_server = tutils.test_data.path(
"data/trusted-cadir/trusted-ca.pem")
self.pathoc()
class TestHTTPSUpstreamServerVerificationWBadCert(tservers.HTTPProxTest):
"""
Test upstream server certificate verification with an untrusted server cert.
"""
ssl = True
ssloptions = pathod.SSLOptions(
cn="untrusted-cert",
certs=[
("untrusted-cert", tutils.test_data.path("data/untrusted-server.crt"))
])
def _request(self):
p = self.pathoc()
# We need to make an actual request because the upstream connection is lazy-loaded.
return p.request("get:/p/242")
def test_default_verification_w_bad_cert(self):
"""Should use no verification."""
self.config.openssl_trusted_ca_server = tutils.test_data.path(
"data/trusted-cadir/trusted-ca.pem")
assert self._request().status_code == 242
def test_no_verification_w_bad_cert(self):
self.config.openssl_verification_mode_server = SSL.VERIFY_NONE
self.config.openssl_trusted_ca_server = tutils.test_data.path(
"data/trusted-cadir/trusted-ca.pem")
assert self._request().status_code == 242
def test_verification_w_bad_cert(self):
self.config.openssl_verification_mode_server = SSL.VERIFY_PEER
self.config.openssl_trusted_ca_server = tutils.test_data.path(
"data/trusted-cadir/trusted-ca.pem")
assert self._request().status_code == 502
class TestHTTPSNoCommonName(tservers.HTTPProxTest):
"""
Test what happens if we get a cert without common name back.
"""
ssl = True
ssloptions = pathod.SSLOptions(
certs=[
("*", tutils.test_data.path("data/no_common_name.pem"))
]
)
def test_http(self):
f = self.pathod("202")
assert f.sslinfo.certchain[0].get_subject().CN == "127.0.0.1"
class TestReverse(tservers.ReverseProxTest, CommonMixin, TcpMixin):
reverse = True
class TestSocks5(tservers.SocksModeTest):
def test_simple(self):
p = self.pathoc()
p.socks_connect(("localhost", self.server.port))
f = p.request("get:/p/200")
assert f.status_code == 200
def test_with_authentication_only(self):
p = self.pathoc()
f = p.request("get:/p/200")
assert f.status_code == 502
assert "SOCKS5 mode failure" in f.content
def test_no_connect(self):
"""
mitmproxy doesn't support UDP or BIND SOCKS CMDs
"""
p = self.pathoc()
socks.ClientGreeting(
socks.VERSION.SOCKS5,
[socks.METHOD.NO_AUTHENTICATION_REQUIRED]
).to_file(p.wfile)
socks.Message(
socks.VERSION.SOCKS5,
socks.CMD.BIND,
socks.ATYP.DOMAINNAME,
("example.com", 8080)
).to_file(p.wfile)
p.wfile.flush()
p.rfile.read(2) # read server greeting
f = p.request("get:/p/200") # the request doesn't matter, error response from handshake will be read anyway.
assert f.status_code == 502
assert "SOCKS5 mode failure" in f.content
class TestHttps2Http(tservers.ReverseProxTest):
@classmethod
def get_proxy_config(cls):
d = super(TestHttps2Http, cls).get_proxy_config()
d["upstream_server"] = ("http", d["upstream_server"][1])
return d
def pathoc(self, ssl, sni=None):
"""
Returns a connected Pathoc instance.
"""
p = pathoc.Pathoc(
("localhost", self.proxy.port), ssl=True, sni=sni, fp=None
)
p.connect()
return p
def test_all(self):
p = self.pathoc(ssl=True)
assert p.request("get:'/p/200'").status_code == 200
def test_sni(self):
p = self.pathoc(ssl=True, sni="example.com")
assert p.request("get:'/p/200'").status_code == 200
assert all("Error in handle_sni" not in msg for msg in self.proxy.log)
def test_http(self):
p = self.pathoc(ssl=False)
assert p.request("get:'/p/200'").status_code == 200
class TestTransparent(tservers.TransparentProxTest, CommonMixin, TcpMixin):
ssl = False
def test_tcp_stream_modify(self):
self.master.load_script(
tutils.test_data.path("scripts/tcp_stream_modify.py"))
self._tcpproxy_on()
d = self.pathod('200:b"foo"')
self._tcpproxy_off()
assert d.content == "bar"
self.master.unload_scripts()
class TestTransparentSSL(tservers.TransparentProxTest, CommonMixin, TcpMixin):
ssl = True
def test_sslerr(self):
p = pathoc.Pathoc(("localhost", self.proxy.port), fp=None)
p.connect()
r = p.request("get:/")
assert r.status_code == 502
class TestProxy(tservers.HTTPProxTest):
def test_http(self):
f = self.pathod("304")
assert f.status_code == 304
f = self.master.state.view[0]
assert f.client_conn.address
assert "host" in f.request.headers
assert f.response.status_code == 304
@tutils.skip_appveyor
def test_response_timestamps(self):
# test that we notice at least 1 sec delay between timestamps
# in response object
f = self.pathod("304:b@1k:p50,1")
assert f.status_code == 304
response = self.master.state.view[0].response
# timestamp_start might fire a bit late, so we play safe and only require 300ms.
assert 0.3 <= response.timestamp_end - response.timestamp_start
@tutils.skip_appveyor
def test_request_timestamps(self):
# test that we notice a delay between timestamps in request object
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect(("127.0.0.1", self.proxy.port))
# call pathod server, wait a second to complete the request
connection.send(
"GET http://localhost:%d/p/304:b@1k HTTP/1.1\r\n" %
self.server.port)
time.sleep(1)
connection.send("\r\n")
connection.recv(50000)
connection.close()
request, response = self.master.state.view[
0].request, self.master.state.view[0].response
assert response.status_code == 304 # sanity test for our low level request
# timestamp_start might fire a bit late, so we play safe and only require 300ms.
assert 0.3 <= request.timestamp_end - request.timestamp_start
def test_request_tcp_setup_timestamp_presence(self):
# tests that the client_conn a tcp connection has a tcp_setup_timestamp
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect(("localhost", self.proxy.port))
connection.send(
"GET http://localhost:%d/p/200:b@1k HTTP/1.1\r\n" %
self.server.port)
connection.send("\r\n")
# a bit hacky: make sure that we don't just read the headers only.
recvd = 0
while recvd < 1024:
recvd += len(connection.recv(5000))
connection.send(
"GET http://localhost:%d/p/200:b@1k HTTP/1.1\r\n" %
self.server.port)
connection.send("\r\n")
recvd = 0
while recvd < 1024:
recvd += len(connection.recv(5000))
connection.close()
first_flow = self.master.state.view[0]
second_flow = self.master.state.view[1]
assert first_flow.server_conn.timestamp_tcp_setup
assert first_flow.server_conn.timestamp_ssl_setup is None
assert second_flow.server_conn.timestamp_tcp_setup
assert first_flow.server_conn.timestamp_tcp_setup == second_flow.server_conn.timestamp_tcp_setup
def test_request_ip(self):
f = self.pathod("200:b@100")
assert f.status_code == 200
f = self.master.state.view[0]
assert f.server_conn.address == ("127.0.0.1", self.server.port)
class TestProxySSL(tservers.HTTPProxTest):
ssl = True
def test_request_ssl_setup_timestamp_presence(self):
# tests that the ssl timestamp is present when ssl is used
f = self.pathod("304:b@10k")
assert f.status_code == 304
first_flow = self.master.state.view[0]
assert first_flow.server_conn.timestamp_ssl_setup
class MasterRedirectRequest(tservers.TestMaster):
redirect_port = None # Set by TestRedirectRequest
def handle_request(self, f):
if f.request.path == "/p/201":
# This part should have no impact, but it should also not cause any exceptions.
addr = f.live.server_conn.address
addr2 = Address(("127.0.0.1", self.redirect_port))
f.live.set_server(addr2)
f.live.set_server(addr)
# This is the actual redirection.
f.request.port = self.redirect_port
super(MasterRedirectRequest, self).handle_request(f)
def handle_response(self, f):
f.response.content = str(f.client_conn.address.port)
f.response.headers["server-conn-id"] = str(f.server_conn.source_address.port)
super(MasterRedirectRequest, self).handle_response(f)
class TestRedirectRequest(tservers.HTTPProxTest):
masterclass = MasterRedirectRequest
ssl = True
def test_redirect(self):
"""
Imagine a single HTTPS connection with three requests:
1. First request should pass through unmodified
2. Second request will be redirected to a different host by an inline script
3. Third request should pass through unmodified
This test verifies that the original destination is restored for the third request.
"""
self.master.redirect_port = self.server2.port
p = self.pathoc()
self.server.clear_log()
self.server2.clear_log()
r1 = p.request("get:'/p/200'")
assert r1.status_code == 200
assert self.server.last_log()
assert not self.server2.last_log()
self.server.clear_log()
self.server2.clear_log()
r2 = p.request("get:'/p/201'")
assert r2.status_code == 201
assert not self.server.last_log()
assert self.server2.last_log()
self.server.clear_log()
self.server2.clear_log()
r3 = p.request("get:'/p/202'")
assert r3.status_code == 202
assert self.server.last_log()
assert not self.server2.last_log()
assert r1.content == r2.content == r3.content
class MasterStreamRequest(tservers.TestMaster):
"""
Enables the stream flag on the flow for all requests
"""
def handle_responseheaders(self, f):
f.response.stream = True
f.reply()
class TestStreamRequest(tservers.HTTPProxTest):
masterclass = MasterStreamRequest
def test_stream_simple(self):
p = self.pathoc()
# a request with 100k of data but without content-length
r1 = p.request("get:'%s/p/200:r:b@100k:d102400'" % self.server.urlbase)
assert r1.status_code == 200
assert len(r1.content) > 100000
def test_stream_multiple(self):
p = self.pathoc()
# simple request with streaming turned on
r1 = p.request("get:'%s/p/200'" % self.server.urlbase)
assert r1.status_code == 200
# now send back 100k of data, streamed but not chunked
r1 = p.request("get:'%s/p/201:b@100k'" % self.server.urlbase)
assert r1.status_code == 201
def test_stream_chunked(self):
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect(("127.0.0.1", self.proxy.port))
fconn = connection.makefile()
spec = '200:h"Transfer-Encoding"="chunked":r:b"4\\r\\nthis\\r\\n11\\r\\nisatest__reachhex\\r\\n0\\r\\n\\r\\n"'
connection.send(
"GET %s/p/%s HTTP/1.1\r\n" %
(self.server.urlbase, spec))
connection.send("\r\n")
resp = http1.read_response_head(fconn)
assert resp.headers["Transfer-Encoding"] == 'chunked'
assert resp.status_code == 200
chunks = list(http1.read_body(fconn, None))
assert chunks == ["this", "isatest__reachhex"]
connection.close()
class MasterFakeResponse(tservers.TestMaster):
def handle_request(self, f):
resp = HTTPResponse.wrap(netlib.tutils.tresp())
f.reply(resp)
class TestFakeResponse(tservers.HTTPProxTest):
masterclass = MasterFakeResponse
def test_fake(self):
f = self.pathod("200")
assert "header-response" in f.headers
class TestServerConnect(tservers.HTTPProxTest):
masterclass = MasterFakeResponse
no_upstream_cert = True
ssl = True
def test_unnecessary_serverconnect(self):
"""A replayed/fake response with no_upstream_cert should not connect to an upstream server"""
assert self.pathod("200").status_code == 200
for msg in self.proxy.tmaster.log:
assert "serverconnect" not in msg
class MasterKillRequest(tservers.TestMaster):
def handle_request(self, f):
f.reply(Kill)
class TestKillRequest(tservers.HTTPProxTest):
masterclass = MasterKillRequest
def test_kill(self):
with raises(HttpReadDisconnect):
self.pathod("200")
# Nothing should have hit the server
assert not self.server.last_log()
class MasterKillResponse(tservers.TestMaster):
def handle_response(self, f):
f.reply(Kill)
class TestKillResponse(tservers.HTTPProxTest):
masterclass = MasterKillResponse
def test_kill(self):
with raises(HttpReadDisconnect):
self.pathod("200")
# The server should have seen a request
assert self.server.last_log()
class EResolver(tservers.TResolver):
def original_addr(self, sock):
raise RuntimeError("Could not resolve original destination.")
class TestTransparentResolveError(tservers.TransparentProxTest):
resolver = EResolver
def test_resolve_error(self):
assert self.pathod("304").status_code == 502
class MasterIncomplete(tservers.TestMaster):
def handle_request(self, f):
resp = HTTPResponse.wrap(netlib.tutils.tresp())
resp.content = CONTENT_MISSING
f.reply(resp)
class TestIncompleteResponse(tservers.HTTPProxTest):
masterclass = MasterIncomplete
def test_incomplete(self):
assert self.pathod("200").status_code == 502
class TestUpstreamProxy(tservers.HTTPUpstreamProxTest, CommonMixin, AppMixin):
ssl = False
def test_order(self):
self.proxy.tmaster.replacehooks.add(
"~q",
"foo",
"bar") # replace in request
self.chain[0].tmaster.replacehooks.add("~q", "bar", "baz")
self.chain[1].tmaster.replacehooks.add("~q", "foo", "oh noes!")
self.chain[0].tmaster.replacehooks.add(
"~s",
"baz",
"ORLY") # replace in response
p = self.pathoc()
req = p.request("get:'%s/p/418:b\"foo\"'" % self.server.urlbase)
assert req.content == "ORLY"
assert req.status_code == 418
class TestUpstreamProxySSL(
tservers.HTTPUpstreamProxTest,
CommonMixin,
TcpMixin):
ssl = True
def _host_pattern_on(self, attr):
"""
Updates config.check_tcp or check_ignore, depending on attr.
"""
assert not hasattr(self, "_ignore_%s_backup" % attr)
backup = []
for proxy in self.chain:
old_matcher = getattr(
proxy.tmaster.server.config,
"check_%s" %
attr)
backup.append(old_matcher)
setattr(
proxy.tmaster.server.config,
"check_%s" % attr,
HostMatcher([".+:%s" % self.server.port] + old_matcher.patterns)
)
setattr(self, "_ignore_%s_backup" % attr, backup)
def _host_pattern_off(self, attr):
backup = getattr(self, "_ignore_%s_backup" % attr)
for proxy in reversed(self.chain):
setattr(
proxy.tmaster.server.config,
"check_%s" % attr,
backup.pop()
)
assert not backup
delattr(self, "_ignore_%s_backup" % attr)
def _ignore_on(self):
super(TestUpstreamProxySSL, self)._ignore_on()
self._host_pattern_on("ignore")
def _ignore_off(self):
super(TestUpstreamProxySSL, self)._ignore_off()
self._host_pattern_off("ignore")
def _tcpproxy_on(self):
super(TestUpstreamProxySSL, self)._tcpproxy_on()
self._host_pattern_on("tcp")
def _tcpproxy_off(self):
super(TestUpstreamProxySSL, self)._tcpproxy_off()
self._host_pattern_off("tcp")
def test_simple(self):
p = self.pathoc()
req = p.request("get:'/p/418:b\"content\"'")
assert req.content == "content"
assert req.status_code == 418
# CONNECT from pathoc to chain[0],
assert self.proxy.tmaster.state.flow_count() == 2
# request from pathoc to chain[0]
# CONNECT from proxy to chain[1],
assert self.chain[0].tmaster.state.flow_count() == 2
# request from proxy to chain[1]
# request from chain[0] (regular proxy doesn't store CONNECTs)
assert self.chain[1].tmaster.state.flow_count() == 1
class TestProxyChainingSSLReconnect(tservers.HTTPUpstreamProxTest):
ssl = True
def test_reconnect(self):
"""
Tests proper functionality of ConnectionHandler.server_reconnect mock.
If we have a disconnect on a secure connection that's transparently proxified to
an upstream http proxy, we need to send the CONNECT request again.
"""
def kill_requests(master, attr, exclude):
k = [0] # variable scope workaround: put into array
_func = getattr(master, attr)
def handler(f):
k[0] += 1
if not (k[0] in exclude):
f.client_conn.finish()
f.error = Error("terminated")
f.reply(Kill)
return _func(f)
setattr(master, attr, handler)
kill_requests(self.chain[1].tmaster, "handle_request",
exclude=[
# fail first request
2, # allow second request
])
kill_requests(self.chain[0].tmaster, "handle_request",
exclude=[
1, # CONNECT
# fail first request
3, # reCONNECT
4, # request
])
p = self.pathoc()
req = p.request("get:'/p/418:b\"content\"'")
assert req.content == "content"
assert req.status_code == 418
assert self.proxy.tmaster.state.flow_count() == 2 # CONNECT and request
# CONNECT, failing request,
assert self.chain[0].tmaster.state.flow_count() == 4
# reCONNECT, request
# failing request, request
assert self.chain[1].tmaster.state.flow_count() == 2
# (doesn't store (repeated) CONNECTs from chain[0]
# as it is a regular proxy)
assert not self.chain[1].tmaster.state.flows[0].response # killed
assert self.chain[1].tmaster.state.flows[1].response
assert self.proxy.tmaster.state.flows[0].request.form_in == "authority"
assert self.proxy.tmaster.state.flows[1].request.form_in == "relative"
assert self.chain[0].tmaster.state.flows[
0].request.form_in == "authority"
assert self.chain[0].tmaster.state.flows[
1].request.form_in == "relative"
assert self.chain[0].tmaster.state.flows[
2].request.form_in == "authority"
assert self.chain[0].tmaster.state.flows[
3].request.form_in == "relative"
assert self.chain[1].tmaster.state.flows[
0].request.form_in == "relative"
assert self.chain[1].tmaster.state.flows[
1].request.form_in == "relative"
req = p.request("get:'/p/418:b\"content2\"'")
assert req.status_code == 502
assert self.proxy.tmaster.state.flow_count() == 3 # + new request
# + new request, repeated CONNECT from chain[1]
assert self.chain[0].tmaster.state.flow_count() == 6
# (both terminated)
# nothing happened here
assert self.chain[1].tmaster.state.flow_count() == 2
|
|
# Copyright (c) 2016 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
from collections import OrderedDict
from contextlib import contextmanager
import functools
import hashlib
import json
import os
import oslo_serialization
import random
import re
import six
import threading
import time
import traceback
import uuid
import weakref
from apicapi import apic_client
from oslo_config import cfg
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
AIM_LOCK_PREFIX = 'aim_lock'
OPENSTACK_VMM_TYPE = 'OpenStack'
VMWARE_VMM_TYPE = 'VMware'
KNOWN_VMM_TYPES = {'openstack': OPENSTACK_VMM_TYPE,
'vmware': VMWARE_VMM_TYPE}
ACI_FAULT = 'faultInst'
def log(method):
"""Decorator helping to log method calls."""
_LOG = logging.getLogger(method.__module__)
@functools.wraps(method)
def wrapper(*args, **kwargs):
instance = args[0]
data = {"class_name": "%s.%s" % (instance.__class__.__module__,
instance.__class__.__name__),
"method_name": method.__name__,
"args": args[1:], "kwargs": kwargs}
_LOG.debug('%(class_name)s method %(method_name)s'
' called with arguments %(args)s %(kwargs)s', data)
return method(*args, **kwargs)
return wrapper
# created a common function for tackling issues of sorting a list of dict
# or list of string for Py3 support
def deep_sort(obj):
if isinstance(obj, dict):
obj = OrderedDict(sorted(obj.items()))
for k, v in list(obj.items()):
if isinstance(v, dict) or isinstance(v, list):
obj[k] = deep_sort(v)
if isinstance(obj, list):
for i, v in enumerate(obj):
if isinstance(v, dict) or isinstance(v, list):
obj[i] = deep_sort(v)
obj = sorted(obj, key=lambda x: json.dumps(x))
return obj
def is_equal(obj1, obj2):
sorted_obj1 = deep_sort(obj1)
sorted_obj2 = deep_sort(obj2)
return sorted_obj1 == sorted_obj2
# In Py3, keyword 'cmp' is undefined
def cmp(obj1, obj2):
_cmp = lambda x, y: (x > y) - (x < y)
return _cmp(obj1, obj2)
def generate_uuid():
return str(uuid.uuid4())
def sleep(time_in_seconds):
time.sleep(time_in_seconds)
def wait_for_next_cycle(start_time, polling_interval, log, readable_caller='',
notify_exceeding_timeout=True):
# sleep till end of polling interval
elapsed = get_time() - start_time
log.debug("%(caller)s loop - completed in %(time).3f. ",
{'caller': readable_caller, 'time': elapsed})
if elapsed < polling_interval:
sleep(polling_interval - elapsed)
elif notify_exceeding_timeout:
log.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': polling_interval,
'elapsed': elapsed})
sleep(0)
else:
sleep(0)
class Counter(object):
def __init__(self, num=0):
self.num = num
def get(self):
return self.num
def increment(self):
self.num += 1
def get_backoff_time(max_time, tentative_number):
try:
return min(random.random() * (2 ** tentative_number), max_time)
except OverflowError:
return max_time
def exponential_backoff(max_time, tentative=None):
tentative = tentative or Counter()
sleep_time_secs = get_backoff_time(max_time, tentative.get())
LOG.debug('Sleeping for %s seconds' % sleep_time_secs)
sleep(sleep_time_secs)
tentative.increment()
return tentative
def perform_harakiri(log, message=None):
log.error("AAAAARGH!")
if message:
log.error(message)
if cfg.CONF.aim.recovery_restart:
os._exit(1)
def stob(s):
if s.lower() in ['true', 'yes', 't', 'y', '1']:
return True
if s.lower() in ['false', 'no', 'f', 'n', '0']:
return False
return None
def camel_to_snake(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def snake_to_lower_camel(name):
split = name.split('_')
return split[0] + ''.join(word.capitalize() for word in split[1:])
def sanitize_name(type, *args):
# Updated due to PY3 error
# TypeError: Unicode-objects must be encoded before hashing
h = hashlib.sha256()
h.update(type.encode('utf-8'))
h.update('\x00'.encode('utf-8'))
for component in args:
h.update(component.encode('utf-8'))
h.update('\x00'.encode('utf-8'))
return (base64.b32encode(h.digest()).decode('utf-8').rstrip('=').lower())
class ThreadExit(Exception):
pass
class StopLoop(Exception):
pass
def retry_loop(max_wait, max_retries, name, fail=False, return_=False):
def wrap(func):
@functools.wraps(func)
def inner(*args, **kwargs):
recovery_retries = None
while True:
try:
res = func(*args, **kwargs)
if return_:
return res
recovery_retries = None
except StopLoop:
return
except ThreadExit as e:
raise e
except Exception as e:
LOG.error(traceback.format_exc())
recovery_retries = exponential_backoff(
max_wait, tentative=recovery_retries)
if recovery_retries.get() >= max_retries:
LOG.error("Exceeded max recovery retries for %s", name)
if fail:
perform_harakiri(LOG, str(e))
raise e
return inner
return wrap
class FakeContext(object):
def __init__(self, store=None):
if store:
self.store = store
def decompose_dn(mo_type, dn):
try:
return apic_client.DNManager().aci_decompose_dn_guess(dn, mo_type)[1]
except (apic_client.DNManager.InvalidNameFormat, KeyError,
apic_client.cexc.ApicManagedObjectNotSupported, IndexError):
log_ = LOG.warning
if mo_type == 'faultDelegate':
log_ = LOG.debug
log_("Failed to transform DN %s to key for type %s" % (dn, mo_type))
return
def retrieve_fault_parent(fault_dn, resource_map):
# external is the DN of the ACI resource
dn_mgr = apic_client.DNManager()
mos_rns = dn_mgr.aci_decompose_with_type(fault_dn, ACI_FAULT)[:-1]
rns = dn_mgr.filter_rns(mos_rns)
conv_info = None
step = -1
while conv_info is None or len(conv_info) > 1:
aci_klass = mos_rns[step][0]
conv_info = resource_map[aci_klass]
step -= 1
conv_info = conv_info[0]
klasses = [conv_info['resource']]
if conv_info.get('alt_resource'):
klasses.append(conv_info['alt_resource'])
parents = []
for klass in klasses:
a_obj = klass(**{y: rns[x]
for x, y in enumerate(klass.identity_attributes)})
parents.append(a_obj)
return parents
class ThreadKillTimeout(Exception):
message = "Thread kill timed out"
class AIMThread(object):
KILL_TIMEOUT = 10
def __init__(self, *args, **kwargs):
self._thread = None
self._stop = False
def start(self):
self._thread = spawn_thread(self.run)
return self
def run(self):
pass
def kill(self, wait=False, timeout=KILL_TIMEOUT):
if self._thread:
self._stop = True
if wait:
tentative = None
curr_time = get_time()
while not self.dead and curr_time + timeout < get_time():
exponential_backoff(timeout / 3, tentative)
if not self.dead:
raise
@property
def dead(self):
if self._thread:
return not self._thread.is_alive()
def spawn_thread(target, *args, **kwargs):
thd = threading.Thread(target=target, args=args, kwargs=kwargs)
thd.daemon = True
thd.start()
return thd
# Key/Values will be garbage collected once al references are lost
all_locks = weakref.WeakValueDictionary()
_master_lock = threading.Lock()
class LockNotAcquired(Exception):
pass
def generate_rlock(lock_name):
with _master_lock:
return all_locks.setdefault(lock_name, threading.RLock())
@contextmanager
def get_rlock(lock_name, blocking=True):
lock = generate_rlock(lock_name)
if not lock.acquire(blocking):
raise LockNotAcquired()
try:
yield lock
finally:
lock.release()
def rlock(lock_name):
def wrap(func):
@functools.wraps(func)
def inner(*args, **kwargs):
# setdefault is atomic
lock = generate_rlock(lock_name)
# Too much output if we log this. However, it would be really
# useful to have a debug mode that show us which lock is held
# by which thread/method
try:
lock.acquire()
return func(*args, **kwargs)
finally:
lock.release()
return inner
return wrap
def _byteify(data, ignore_dicts=False):
if isinstance(data, six.text_type):
return data.encode('utf-8')
if isinstance(data, list):
return [_byteify(item, ignore_dicts=True) for item in data]
if isinstance(data, dict) and not ignore_dicts:
return {
_byteify(key, ignore_dicts=True): _byteify(value,
ignore_dicts=True)
for key, value in list(data.items())
}
return data
def json_loads(json_text):
# The ultimate aim of using _byteify is to convert unicode data to
# bytes (or else it will dump the data into unicode format, since
# data is in unicode format).
# We are converting into bytes because in Py2, string and bytes are
# same in Py2.
# While we don't need to convert it into bytes for Py3, because string
# and unicode are same in Py3. So no need for conversion in PY3 case.
if six.PY3:
return oslo_serialization.jsonutils.loads(json_text)
return _byteify(
oslo_serialization.jsonutils.loads(json_text, object_hook=_byteify),
ignore_dicts=True)
def json_dumps(dict):
return oslo_serialization.jsonutils.dump_as_bytes(dict)
def schedule_next_event(interval, deviation):
return get_time() + interval + random.randrange(-interval * deviation,
interval * deviation)
def get_time():
return time.time()
|
|
#! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2010,2011 Patrick Crews
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
""" mysqld.py: code to allow a serverManager
to provision and start up a mysqld server object
for test execution
"""
# imports
import os
import sys
import subproc
from lib.server_mgmt.server import Server
class mysqlServer(Server):
""" represents a mysql server, its possessions
(datadir, ports, etc), and methods for controlling
and querying it
TODO: create a base server class that contains
standard methods from which we can inherit
Currently there are definitely methods / attr
which are general
"""
def __init__( self, name, server_manager, code_tree, default_storage_engine
, server_options, requester, test_executor, workdir_root):
super(mysqlServer, self).__init__( name
, server_manager
, code_tree
, default_storage_engine
, server_options
, requester
, test_executor
, workdir_root)
self.preferred_base_port = 9306
# client files
self.mysqldump = self.code_tree.mysqldump
self.mysqladmin = self.code_tree.mysqladmin
self.mysql_client = self.code_tree.mysql_client
self.mysqlimport = self.code_tree.mysqlimport
self.mysqlslap = self.code_tree.mysqlslap
self.server_path = self.code_tree.mysql_server
self.mysql_client_path = self.code_tree.mysql_client
# important stuff
self.langdir = self.code_tree.langdir
self.charsetdir = self.code_tree.charsetdir
self.bootstrap_file = self.code_tree.bootstrap_path
self.bootstrap_cmd = None
# Get our ports
self.port_block = self.system_manager.port_manager.get_port_block( self.name
, self.preferred_base_port
, 1 )
self.master_port = self.port_block[0]
# Generate our working directories
self.dirset = { self.name : { 'var': {'std_data_ln':( os.path.join(self.code_tree.testdir,'std_data'))
,'log':None
,'run':None
,'tmp':None
,'master-data': {'local': { 'test':None
, 'mysql':None
}
}
}
}
}
self.workdir = self.system_manager.create_dirset( workdir_root
, self.dirset)
self.vardir = os.path.join(self.workdir,'var')
self.tmpdir = os.path.join(self.vardir,'tmp')
self.rundir = os.path.join(self.vardir,'run')
self.logdir = os.path.join(self.vardir,'log')
self.std_data = os.path.join(self.vardir,'std_data_ln')
self.datadir = os.path.join(self.vardir,'master-data')
self.error_log = os.path.join(self.logdir,('%s.err' %(self.name)))
self.bootstrap_log = os.path.join(self.logdir,('bootstrap.log'))
self.pid_file = os.path.join(self.rundir,('%s.pid' %(self.name)))
self.socket_file = os.path.join(self.vardir, ('%s.sock' %(self.name)))
if len(self.socket_file) > 107:
# MySQL has a limitation of 107 characters for socket file path
# we copy the mtr workaround of creating one in /tmp
self.logging.verbose("Default socket file path: %s" %(self.socket_file))
self.socket_file = "/tmp/%s_%s.%s.sock" %(self.system_manager.uuid
,self.owner
,self.name)
self.logging.verbose("Changing to alternate: %s" %(self.socket_file))
self.timer_file = os.path.join(self.logdir,('timer'))
self.general_log_file = os.path.join(self.logdir,'mysqld.log')
self.slow_query_log_file = os.path.join(self.logdir,'mysqld-slow.log')
self.snapshot_path = os.path.join(self.tmpdir,('snapshot_%s' %(self.master_port)))
# We want to use --secure-file-priv = $vardir by default
# but there are times / tools when we need to shut this off
if self.no_secure_file_priv:
self.secure_file_string = ''
else:
self.secure_file_string = "--secure-file-priv='%s'" %(self.vardir)
self.user_string = '--user=root'
self.initialize_databases()
self.take_db_snapshot()
self.logging.debug_class(self)
def report(self):
""" We print out some general useful info """
report_values = [ 'name'
, 'master_port'
, 'socket_file'
, 'vardir'
, 'status'
]
self.logging.info("%s server:" %(self.owner))
for key in report_values:
value = vars(self)[key]
self.logging.info("%s: %s" %(key.upper(), value))
def initialize_databases(self):
""" Do the voodoo required to have a working database setup.
For MySQL, this is calling the server with the
--bootstrap argument. We generate the bootstrap
file during codeTree intialization as the file is standard for
all MySQL servers that are spawned from a single codeTree
"""
# generate the bootstrap startup command
if not self.bootstrap_cmd:
mysqld_args = [ "--no-defaults"
, "--bootstrap"
, "--basedir=%s" %(self.code_tree.basedir)
, "--datadir=%s" %(self.datadir)
, "--loose-skip-falcon"
, "--loose-skip-ndbcluster"
, "--tmpdir=%s" %(self.tmpdir)
, "--core-file"
, "--lc-messages-dir=%s" %(self.langdir)
, "--character-sets-dir=%s" %(self.charsetdir)
]
# We add server_path into the mix this way as we
# may alter how we store / handle server args later
mysqld_args = [self.server_path].append(mysqld_args)
self.bootstrap_cmd = " ".join(mysqld_args)
# execute our command
bootstrap_subproc = subprocess.Popen( self.bootstrap_cmd
, shell=True
, stdout=self.bootstrap_log
, stderr=self.bootstrap_log
)
bootstrap_subproc.wait()
bootstrap_retcode = bootstrap_subproc.returncode
if bootstrap_retcode:
self.logging.error("Received retcode: %s executing command: %s"
%(bootstrap_retcode, self.bootstrap_cmd))
self.logging.error("Check the bootstrap log: %s" %(self.bootstrap_log))
sys.exit(1)
def get_start_cmd(self):
""" Return the command string that will start up the server
as desired / intended
"""
server_args = [ self.process_server_options()
, "--open-files-limit=1024"
, "--local-infile"
, "--character-set-server=latin1"
, "--connect-timeout=60"
, "--log-bin-trust-function-creators=1"
, "--key_buffer_size=1M"
, "--sort_buffer=256K"
, "--max_heap_table_size=1M"
, "--query-cache-size=0"
, "--loose-innodb_data_file_path=ibdata1:10M:autoextend"
, "--loose-innodb_buffer_pool_size=32M"
, "--loose-innodb_write_io_threads=2"
, "--loose-innodb_read_io_threads=2"
, "--loose-innodb_log_buffer_size=1M"
, "--loose-innodb_log_file_size=5M"
, "--loose-innodb_additional_mem_pool_size=1M"
, "--loose-innodb_log_files_in_group=2"
, "--slave-net-timeout=120"
, "--log-bin=mysqld-bin"
, "--loose-enable-performance-schema"
, "--loose-performance-schema-max-mutex-instances=10000"
, "--loose-performance-schema-max-rwlock-instances=10000"
, "--loose-performance-schema-max-table-instances=500"
, "--loose-performance-schema-max-table-handles=1000"
, "--binlog-direct-non-transactional-updates"
, "--loose-enable-performance-schema"
, "--log-bin=master-bin"
, "--general_log=1"
, "--general_log_file=%s" %(self.general_log_file)
, "--slow_query_log=1"
, "--slow_query_log_file=%s" %(self.slow_query_log_file)
, "--basedir=%s" %(self.codetree.basedir)
, "--datadir=%s" %(self.datadir)
, "--tmpdir=%s" %(self.tmpdir)
, "--character-sets-dir=%s" %(self.charsetdir)
, "--lc-messages-dir=%s" %(self.langdir)
, "--ssl-ca=%s" %(os.path.join(self.std_data,'cacert.pem'))
, "--ssl-cert=%s" %(os.path.join(self.std_data,'server-cert.pem'))
, "--ssl-key=%s" %(os.path.join(self.std_data,'server-key.pem'))
, "--port=%d" %(self.master_port)
, "--mysql-protocol.connect-timeout=60"
, "--innodb.data-file-path=ibdata1:20M:autoextend"
, "--sort-buffer-size=256K"
, "--max-heap-table-size=1M"
, "--socket=%s" %(self.socket_file)
, "--pid-file=%s" %(self.pid_file)
, "--default-storage-engine=%s" %(self.default_storage_engine)
, "--server-id=%d" %(self.get_numeric_server_id)
, self.secure_file_string
, self.user_string
]
if self.gdb:
server_args.append('--gdb')
return self.system_manager.handle_gdb_reqs(self, server_args)
else:
return "%s %s %s & " % ( self.cmd_prefix
, self.server_path
, " ".join(server_args)
)
def get_stop_cmd(self):
""" Return the command that will shut us down """
return "%s --user=root --port=%d --connect-timeout=5 --silent --password= --shutdown " %(self.mysql_admin, self.master_port)
def get_ping_cmd(self):
"""Return the command string that will
ping / check if the server is alive
"""
return '%s --port=%d --user=root -hlocalhost --protocol=tcp -e ""' % (self.mysql_client_path, self.master_port)
def is_started(self):
""" Determine if the server is up and running -
this may vary from server type to server type
"""
# We experiment with waiting for a pid file to be created vs. pinging
# This is what test-run.pl does and it helps us pass logging_stats tests
# while not self.ping_server(server, quiet=True) and timer != timeout:
return self.system_manager.find_path( [self.pid_file]
, required=0)
|
|
from glob import glob
import unittest
import shutil
from irods.exception import CollectionDoesNotExist, DataObjectDoesNotExist
import astrogen
import os
import config
import makeflow_gen
import pdb
from os import path
from textwrap import dedent
from irods.session import iRODSSession
__irods_server_host__ = 'bitol.iplantcollaborative.org'
__irods_server_port__ = "1247"
__irods_server_zone__ = "iplant"
__test_dir__ = os.path.join(astrogen.__pkg_root__, os.pardir, 'tests')
# TODO
# * test for run_parameter_extraction to be sure all and only desired files
# are used
class TestAstrogen(unittest.TestCase):
def setUp(self):
astrogen.__batch_dir__ = os.path.join(__test_dir__, 'fits_files')
ag = astrogen.Astrogen()
# set parameters normally gotten from astrogen.cfg
ag.iplant_params = {
'host': 'bitol.iplantcollaborative.org',
'port': 1247,
'zone': 'iplant',
'iplant_path': '/iplant/home/david_sidi/astrometry/test_fits',
'iplant_write_path': '/iplant/home/david_sidi/astrometry/output'
}
ag.path_to_netpbm = '/home/u12/ericlyons/bin/newnetpbm/bin'
ag.path_to_solve_field = '/gsfs1/xdisk/dsidi/midterm/astrometry.net\-0.50/blind/solve-field'
self.ag = ag
def test_get_cleaned_data_objects(self):
cleaned_objs = self.ag._get_data_objects()
names = [obj.name for obj in cleaned_objs]
pass
# TODO update filenames here, and uncomment assertion
# correct_names = [
# Briol_1197Rhodesia_20140630_044345_TA_FITS.fit
# Briol_1197Rhodesia_20140630_044345_flatfield_TA_FITS.fit
# Briol_1197Rhodesia_20140630_053258_TA_FITS.fit
# Briol_1197Rhodesia_20140630_055229_TA_FITS.fit
# Briol_1197Rhodesia_20140630_055229_flatfield_TA_FITS.fit
# Briol_1197Rhodesia_20140704_045604_TA_FITS.fit
# Briol_1197Rhodesia_20140704_052116_TA_FITS.fit
# Briol_1197Rhodesia_20140704_053610_TA_FITS.fit
# Briol_1197Rhodesia_20140706_041323_TA_FITS.fit
# Briol_1197Rhodesia_20140706_042914_TA_FITS.fit
# Briol_1197Rhodesia_20140706_044914_TA_FITS.fit
# Briol_1241Dysona_20150214_010819_TA_FITS.fit
# Briol_1241Dysona_20150214_020103_TA_FITS.fit
# Briol_1241Dysona_20150214_022401_TA_FITS.fit
# ]
# self.assertListEqual(names, correct_names)
def test_solve_batch_astrometry(self):
# TODO this side-effects a lot, test for a run with a single FITS
# file in a test fits_files dir
self.ag._solve_batch_astrometry()
def test_batching(self):
full_dataset_ag = astrogen.Astrogen()
cleaned_objects = full_dataset_ag._get_data_objects()
current_batch_size = 0
for data_object in cleaned_objects:
if current_batch_size < full_dataset_ag.max_batch_size:
full_dataset_ag._add_to_local_batch(data_object)
current_batch_size = os.path.getsize(astrogen.__batch_dir__) / 1024 ** 2
else:
# call astronomy.net stuff on this batch
print "DONE"
break # in test only, stop after first batch
self.assertLessEqual(current_batch_size, 100)
def test_logging(self):
try:
if path.exists(path.join(astrogen.__pkg_root__, os.pardir,
'resources', 'astrogen.log')):
assert True
except OSError:
assert False
def test_iplant_fetch(self):
self.test_coll_path = '/iplant/home/elyons/ACIC/midterm-Carl-Hergenrother'
# get user and pword from user entry when astrogen was constructed
user = self.ag.user
password = self.ag.password
self.sess = iRODSSession(host=__irods_server_host__,
port=__irods_server_port__,
user=user,
password=password,
zone=__irods_server_zone__)
self.test_coll = self.sess.collections.get(self.test_coll_path)
self.sess.cleanup()
def test_iplant_deposition(self):
# temporarily reset resources directory
orig_resources_dir = astrogen.__resources_dir__
astrogen.__resources_dir__ = __test_dir__
# add temporarily files with the extensions we're interested in
extensions = [
'fit',
'cfg',
'out',
'axy',
'xyls',
'match',
'new',
'rdls',
'solved'
]
# used again, so store filenames
filenames = ['test_file.' + extension for extension in extensions]
for fn in filenames:
filepath = os.path.join(astrogen.__resources_dir__, 'fits_files', fn)
try:
os.mknod(filepath)
except OSError as e : # if file exists
print "problem creating filename: {}. Continuing, hoping for the
best...".format(e)
continue
# try movin' 'em
self.ag._move_makeflow_solutions()
# check that they're in the store now
sess = self.ag._get_irods_session()
iplant_path = self.ag.iplant_params['iplant_write_path']
leaves = ('modified_fits', 'astrometrica_config_files',
'other_solution_files')
for leaf_dir in leaves:
dirpath = os.path.join(iplant_path, leaf_dir)
# check that dirs were created
try:
coll = sess.collections.get(dirpath)
except CollectionDoesNotExist:
self.fail('iPlant directory not created.')
# check that an object exists
try:
obj = coll.data_objects[0]
except IndexError, DataObjectDoesNotExist:
self.fail("Object not created in iPlant.")
finally:
# clean up temporary directories
coll.remove(recurse=True, force=True)
# return resources directory to its original value
astrogen.__resources_dir__ = orig_resources_dir
sess.cleanup()
def test_run_makeflow(self):
actual_stdout = self.ag._run_makeflow(os.path.join(astrogen.__output_dir__, 'makeflows', 'output.mf'))
actual_log_odds = actual_stdout[-12]
actual_RA_DEC = actual_stdout[-11]
actual_stdout_tail = actual_stdout[-9:]
correct_log_odds = 'log-odds ratio 113.642 (2.25997e+49), 19 match, 0 conflict, 67 distractors, 32 index.'
correct_RA_DEC = 'RA,Dec = (358.242,64.0045), pixel scale 2.05136 arcsec/pix.'
correct_stdout_tail = dedent("""\
Field 1: solved with index index-4207-03.fits.
Field 1 solved: writing to file ./Briol_2011UW158_20150727_061740_TA_FITS.solved to indicate this.
Field: Briol_2011UW158_20150727_061740_TA_FITS.fit
Field center: (RA,Dec) = (358.2, 64) deg.
Field center: (RA H:M:S, Dec D:M:S) = (23:52:58.233, +64:00:14.433).
Field size: 47.5702 x 35.535 arcminutes
Field rotation angle: up is -152.913 degrees E of N
Creating new FITS file "./Briol_2011UW158_20150727_061740_TA_FITS.new"...
""")
self.assertEqual(actual_log_odds, correct_log_odds)
self.assertEqual(actual_RA_DEC, correct_RA_DEC)
self.assertEqual(actual_stdout_tail, correct_stdout_tail)
def test_clear_generated_files(self):
makeflows_dir = os.path.join(astrogen.__output_dir__, 'makeflows')
fits_dir = os.path.join(astrogen.__resources_dir__, 'fits_files')
astrogen.Astrogen._clear_generated_files()
self.assertListEqual(os.listdir(os.listdir(makeflows_dir)), [])
num_fits = len(glob(fits_dir, '*.fit'))
self.assertEqual(num_fits, os.listdir(fits_dir))
def run_run_makeflow(self):
# new ag, since the test one for this class uses tests/fits_files, but
# we want resources/fits_files
new_ag = astrogen.Astrogen()
new_ag._run_makeflow(os.path.join(astrogen.__output_dir__, 'makeflows', 'output.mf'))
def tearDown(self):
config_path = path.join(path.curdir, 'test_config.cfg')
try:
os.remove(config_path)
os.remove(os.path.join(astrogen.__resources_dir__, 'fits_files',
'Briol_2011UW158_20150727_061740_TA_FITS.fit'))
all_files_in_makeflows_dir = glob(os.path.join(astrogen.__output_dir__,
'makeflows', '*'))
os.remove(all_files_in_makeflows_dir)
except OSError:
pass
class TestMakeflowGen(unittest.TestCase):
def run_batch_makeflow_gen(self):
"""Makes a makeflow script for a batch of test fits files"""
# fits_filenames = ['Briol_1197Rhodesia_20140630_044345_flatfield_TA_FITS.fit']
fits_filenames = os.listdir(os.path.join(__test_dir__, 'fits_files'))
# temporarily copy the test files to resources/fits_files
for filename in fits_filenames:
abs_file_path = os.path.join(__test_dir__, 'fits_files', filename)
shutil.copy(abs_file_path, os.path.join(astrogen.__resources_dir__, 'fits_files'))
# TODO get these from config file in tests dir
path_to_netpbm = '/home/u12/ericlyons/bin/newnetpbm/bin'
path_to_solve_field = '/gsfs1/xdisk/dsidi/midterm/astrometry.net-0.50/blind/solve-field'
makeflow_gen.makeflow_gen(fits_filenames, path_to_solve_field, path_to_netpbm)
def test_makeflow_gen(self):
fits_filenames = ['Briol_1197Rhodesia_20140630_044345_flatfield_TA_FITS.fit']
# temporarily copy the test file to resources/fits_files
abs_file_path = os.path.join(__test_dir__, 'fits_files', fits_filenames[0])
shutil.copy(abs_file_path, os.path.join(astrogen.__resources_dir__, 'fits_files'))
# TODO get these from config file in tests dir
path_to_netpbm = '/home/u12/ericlyons/bin/newnetpbm/bin'
path_to_solve_field = '/gsfs1/xdisk/dsidi/midterm/astrometry.net-0.50/blind/solve-field'
makeflow_gen.makeflow_gen(fits_filenames, path_to_solve_field, path_to_netpbm)
##
# get actual output of makeflow_gen
#
makeflow_path = os.path.join(astrogen.__output_dir__, 'makeflows', 'output.mf')
with open(makeflow_path) as f:
actual_output = f.read()
##
# get correct output
#
correct_output_filename = 'Briol_1197Rhodesia_20140630_044345_flatfield_TA_FITS.out'
correct_fits_file_abs_path = \
os.path.join(astrogen.__resources_dir__, 'fits_files',
'Briol_1197Rhodesia_20140630_044345_flatfield_TA_FITS.fit')
correct_cfg_path = os.path.join(astrogen.__resources_dir__, 'astrometry.cfg')
solve_field_fixed_params =\
'-g ' \
'-u app ' \
'-L 0.3 ' \
'-p ' \
'--cpulimit 600 ' \
'--wcs none ' \
'--corr none ' \
'--scamp-ref none ' \
'--pnm none ' \
'-H 3.0'
correct_output = dedent("""\
export PATH={netpbm_loc}:$PATH
{output_filename} : {fits_file_loc} {solve_field_path}
\tmodule load python && {solve_field_path} {solve_field_params} --backend-config {config_loc} --overwrite {fits_file_loc} > {output_filename}
""".format(netpbm_loc=path_to_netpbm,
solve_field_loc=path_to_solve_field,
output_filename=correct_output_filename,
config_loc=correct_cfg_path,
fits_file_loc=correct_fits_file_abs_path,
solve_field_path=path_to_solve_field,
solve_field_params=solve_field_fixed_params)
)
self.assertEqual(actual_output, correct_output)
class TestConfig(unittest.TestCase):
def test_config(self):
config_str = dedent('''\
label:
{
key1 : 'value1',
key2 : 17
}
''')
correct_key1 = 'value1'
correct_key2 = 17
with open('test_config.cfg', 'w') as f:
f.write(config_str)
with open('test_config.cfg', 'r') as f:
cfg = config.Config(f)
key1 = cfg.label.key1
key2 = cfg.label.key2
self.assertEqual(key1, correct_key1)
self.assertEqual(key2, correct_key2)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
import discord
from yapsy.PluginManager import PluginManager
from plugin import Plugin
import os
import logging
import urllib.request
import inspect
import commands
import botcommands
if not discord.opus.is_loaded():
discord.opus.load_opus('opus')
# Plugin Command dictionary and decorator
plugincommand = commands._loadCommands()
# Bot Command dictionary and decorator
botcommand = commands._loadCommands()
class Bot(discord.Client):
def __init__(self):
super().__init__()
self.settingsChannelName = 'botsettings' # change this if you want your bot settings channel to have a different name
self.voiceChannel = None
self.voiceStarter = None
self.botCommands = botcommands.BotCommands()
# Setup logging for Discord.py
self.discordLogger = logging.getLogger('discord')
self.discordLogger.setLevel(logging.DEBUG)
self.discordHandler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')
self.discordHandler.setFormatter(logging.Formatter('[%(asctime)s] [%(levelname)s] [%(name)s]: %(message)s'))
self.discordLogger.addHandler(self.discordHandler)
self.logger = logger
# Create Plugin Manager
self.pluginManager = PluginManager(categories_filter={"Plugins": Plugin})
self.pluginManager.setPluginPlaces(["plugins"])
def isAdmin(self, user):
for role in user.roles:
if role.permissions.administrator:
return True
return False
async def reloadPlugins(self):
# Load Plugins
self.pluginManager.locatePlugins()
self.pluginManager.loadPlugins()
# Uncomment to see full command registry logged WARNING: There is a lot of text
#logger.debug('Registry: %s', str(plugincommand.registry))
# TODO: Make this a little more multi-server friendly
disabledplugins = []
settings = await self.getSettingsForTag(list(self.servers)[0], botcommands.BotCommands.TAG)
if settings is not None and 'disabledplugins' in settings:
disabledplugins = settings['disabledplugins'].split(',')
if len(disabledplugins) == 1 and disabledplugins[0] is '':
disabledplugins = []
for plugin in self.pluginManager.getPluginsOfCategory("Plugins"):
# Give the plugin it's dictionary of commands (so it doesn't need to lookup later)
pluginCls = plugin.plugin_object.__class__.__name__
if pluginCls in plugincommand.registry:
plugin.plugin_object.parsedCommands = plugincommand.registry[pluginCls]
else:
plugin.plugin_object.parsedCommands = {}
await plugin.plugin_object._init(plugin.name, self)
logger.debug('%s Initialized!', plugin.name)
for _, _disabledplugin in enumerate(disabledplugins):
if plugin.plugin_object.tag == _disabledplugin or plugin.plugin_object.shortTag == _disabledplugin:
plugin.plugin_object.isDisabled = True
plugin.plugin_object.isDisabledPermanently = True
self.commandCollisions = {}
self.getCommandCollisions()
def getCommandCollisions(self):
# Generate a dictionary of tags & commands that collide
for outer in self.pluginManager.getPluginsOfCategory("Plugins"):
for inner in self.pluginManager.getPluginsOfCategory("Plugins"):
if outer.name == inner.name:
continue
# This should NEVER happen
if outer.plugin_object.tag == inner.plugin_object.tag:
logger.error('Plugin Tag Collision! Tag: [%s]', outer.plugin_object.tag)
# Check their commands to see if there is collision as well
if outer.plugin_object.shortTag == inner.plugin_object.shortTag:
logger.warning('Plugin Short Tag Collision! Short Tag: [%s]', outer.plugin_object.shortTag)
for com in outer.plugin_object.parsedCommands:
if com in inner.plugin_object.parsedCommands:
logger.warning('Plugin Command Collision! Command: [%s]', com)
if outer.plugin_object.shortTag not in self.commandCollisions:
self.commandCollisions[outer.plugin_object.shortTag] = []
self.commandCollisions[outer.plugin_object.shortTag].append(com)
async def shutdownPlugins(self):
for plugin in self.pluginManager.getPluginsOfCategory("Plugins"):
await plugin.plugin_object.shutdown()
logger.debug('Shutdown %s', plugin.name)
if self.voiceChannel != None:
await self.voiceChannel.disconnect()
async def on_ready(self):
print('Logged in as')
print(self.user.name)
print(self.user.id)
print('-----------')
logger.debug('Logged in as [%s] [%s]', self.user.name, self.user.id)
botCmdCls = self.botCommands.__class__.__name__
if botCmdCls in botcommand.registry:
self.botCommands.parsedCommands = botcommand.registry[botCmdCls]
await self.botCommands._init(bot=self)
await self.reloadPlugins()
print('Plugins initialized!')
def isBotCommand(self, command):
return self.botCommands.isCommand(command)
async def on_message(self, message):
if message.author == self.user:
return
if not await self._executeBotCommand(message.content, message.server, message.channel, message.author):
await self._readMessage(message)
if not await self._checkForCommandCollisions(message.content, message.channel):
await self._executePluginCommand(message.content, message.server, message.channel, message.author)
def _parseTextForCommandInfo(self, text):
results = {}
# If we have a command, extract the command and parameters (if any)
if text.startswith('!'):
tmp = text.split(' ', 1)
tmp[0] = tmp[0][1:] # get rid of !
content = ''
# Grab the rest of the string
if len(tmp) > 1:
content = tmp[1]
results['tag'] = tmp[0]
results['content'] = content
tmp = content.split(' ', 1)
results['command'] = tmp[0]
if len(tmp) > 1:
results['args'] = tmp[1]
else:
results['args'] = ''
return results
return None
async def _executeBotCommand(self, text, _server, _channel, _author, **kwargs):
commandInfo = self._parseTextForCommandInfo(text)
if commandInfo is not None:
tag = commandInfo['tag']
content = commandInfo['content']
# If we have a bot command, execute it
if self.isBotCommand(tag):
index, args = self.botCommands.parseCommandArgs(tag, content)
if index == -1:
await self.send_message(_channel, 'Incorrect use of command `{}`. Please see the usage to learn how to properly use this command.\nJust Type: `!usage {}`'.format(tag, tag))
return True
await self.botCommands.executeCommand(index, args, **kwargs, command=tag, server=_server, channel=_channel, author=_author)
return True
# This isn't a bot command and there was nothing after it. This must be an unrecognized command.
elif content == '':
await self.send_message(_channel, 'That is not a recognized command. For help, please try `!help`')
return True
# Otherwise, assume it is a plugin command
return False
async def _checkForCommandCollisions(self, text, channel):
# Check for tag/command collisions here and resolve before letting the plugins handle it
for key, array in self.commandCollisions.items():
for value in array:
if text.startswith('!' + key + ' ' + value):
logger.debug('There is more than one plugin with this short tag and command combination. Please use the full tag.')
await self.send_message(channel, 'There is more than one plugin with this short tag and command combination. Please use the full tag.')
return True
return False
async def _readMessage(self, message):
# Go through each of the plugins and let them read the message
for plugin in self.pluginManager.getPluginsOfCategory("Plugins"):
if not plugin.plugin_object.isDisabled and plugin.plugin_object.isReadingMessages():
await plugin.plugin_object.readMessage(message)
async def _executePluginCommand(self, text, _server, _channel, _author, **kwargs):
commandInfo = self._parseTextForCommandInfo(text)
if commandInfo is not None:
tag = commandInfo['tag']
command = commandInfo['command']
args = commandInfo['args']
found = False
# Go through each of the plugins and see if they can execute the command
for plugin in self.pluginManager.getPluginsOfCategory("Plugins"):
# Check if a plugin can handle the command and execute it if they can
if tag != None and command != None and plugin.plugin_object.isCommand(tag, command):
if plugin.plugin_object.isDisabled:
found = True
await self.send_message(_channel, 'This plugin is currently disabled. To use commands, please enable it first.')
continue
index, temp = plugin.plugin_object.parseCommandArgs(command, args)
tag = plugin.plugin_object.tag # Update the tag for better feedback
if index == -1:
await self.send_message(_channel, 'Incorrect use of command `{}`. Please see the usage to learn how to properly use this command.\nJust type: `!usage {} {}`'.format(command, tag, command))
return
await plugin.plugin_object.executeCommand(index, temp, command=command, server=_server, channel=_channel, author=_author, **kwargs)
found = True
if not found and text.startswith('!'):
await self.send_message(_channel, 'That is not a recognized command. For help, please try `!help`')
def download_image(self, imgUrl, filename):
try:
logger.debug('[download_image]: Opening url')
with urllib.request.urlopen(imgUrl) as imageOnWeb:
logger.debug('[download_image]: Checking if url is image')
if imageOnWeb.info()['Content-Type'].startswith('image'):
logger.debug('[download_image]: Reading Image')
buf = imageOnWeb.read()
logger.debug('[download_image]: Creating file [%s]', os.getcwd() + '/' + filename)
downloadedImage = open(os.getcwd() + '/' + filename, 'wb')
logger.debug('[download_image]: Writing Image')
downloadedImage.write(buf)
downloadedImage.close()
imageOnWeb.close()
else:
logger.debug('[download_image]: Image URL is not an image')
return False
except:
logger.debug('[download_image]: Something failed while reading or writing the image')
return False
logger.debug('[download_image]: Successfully downloaded image')
return True
# Private helper for Settings API
async def _createSettingsChannel(self, server):
logger.debug('No settings found in server %s, creating settings.', server)
return await self.create_channel(server, self.settingsChannelName)
# Private helper for Settings API
async def _getSettingsFromChannel(self, channel):
# Create a dictionary from the settings and return it
result = {}
async for message in self.logs_from(channel, limit=1000000):
if message.content.startswith('_'):
temp = message.content.split('=', 1)[0][1:]
tag = temp.split(':', 1)[0]
key = temp.split(':', 1)[1]
value = message.content.split('=', 1)[1]
if tag not in result:
result[tag] = {}
result[tag][key] = value
return result
# Private helper for Settings API
async def _getSettingsFromChannelForTag(self, channel, plugintag):
# Create a dictionary from the settings and return it
result = {}
async for message in self.logs_from(channel, limit=1000000):
if message.content.startswith('_'):
temp = message.content.split('=', 1)[0][1:]
tag = temp.split(':', 1)[0]
key = temp.split(':', 1)[1]
if tag != plugintag:
continue
value = message.content.split('=', 1)[1]
result[key] = value
return result
# Private helper for Settings API
async def _getMessageFromSettings(self, channel, tag, key):
async for message in self.logs_from(channel, limit=1000000):
if message.content.startswith('_' + tag + ':' + key):
return message
return None
# Private helper for Settings API
async def _getSettingsChannel(self, server):
channel = discord.utils.get(server.channels, name=self.settingsChannelName, type=discord.ChannelType.text)
if channel == None:
channel = await self._createSettingsChannel(server)
return channel
# Private helper for Settings API
async def _createSetting(self, channel, tag, key, value):
logger.debug('Creating Setting [%s:%s] with value [%s]', tag, key, value)
await self.send_message(channel, '_{}:{}={}'.format(str(tag), str(key), str(value)))
# Private helper for Settings API
async def _modifySetting(self, message, tag, key, value):
logger.debug('Modifying Setting [%s:%s] with value [%s]', tag, key, value)
await self.edit_message(message, '_{}:{}={}'.format(str(tag), str(key), str(value)))
# Private helper for Settings API
async def _deleteSetting(self, message):
logger.debug('Deleting Setting')
await self.delete_message(message)
# Gets the settings object from the server
# Settings object structure:
# object[plugintag][settingname] = settingvalue
async def getSettings(self, server):
for srv in self.servers:
if srv != server:
continue
channel = await self._getSettingsChannel(srv)
logger.debug('Settings constructed for server %s.', srv)
return await self._getSettingsFromChannel(channel)
logger.debug('The bot is not part of server %s!', server)
return None
# Gets the settings object from the server for a specific plugin tag
# Settings object structure:
# object[settingname] = settingvalue
async def getSettingsForTag(self, server, tag):
for srv in self.servers:
if srv != server:
continue
channel = await self._getSettingsChannel(srv)
logger.debug('Settings constructed for server %s.', srv)
return await self._getSettingsFromChannelForTag(channel, tag)
logger.debug('The bot is not part of server %s!', server)
return None
# Modifies the setting if it exists and creates it if it doesn't
async def modifySetting(self, server, tag, key, value):
for srv in self.servers:
if srv != server:
continue
channel = await self._getSettingsChannel(srv)
message = await self._getMessageFromSettings(channel, tag, key)
if message == None:
await self._createSetting(channel, tag, key, value)
else:
await self._modifySetting(message, tag, key, value)
return
logger.debug('The bot is not part of server %s!', server)
# Deletes the setting
async def deleteSetting(self, server, tag, key):
for srv in self.servers:
if srv != server:
continue
channel = await self._getSettingsChannel(srv)
message = await self._getMessageFromSettings(channel, tag, key)
if message != None:
await self._deleteSetting(message)
return
logger.debug('The bot is not part of server %s!', server)
# Returns whether the server has a specific setting
async def hasSetting(self, server, tag, key):
for srv in self.servers:
if srv != server:
continue
channel = await self._getSettingsChannel(srv)
message = await self._getMessageFromSettings(channel, tag, key)
if message == None:
return False
else:
return True
logger.debug('The bot is not part of server %s!', server)
return False
# Returns the user's object if it exists. The username can be their discord name or server nickname
def getUserFromName(self, server, username):
user = None
for member in server.members:
if member.display_name == username:
user = member
break
if not user:
user = discord.utils.get(server.members, name=username)
if not user:
return None
return user
if __name__ == "__main__":
client = discord.Client()
# Setup logging for the bot
logger = logging.getLogger('styrobot')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename='styrobot.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('[%(asctime)s] [%(levelname)s] [%(name)s]: %(message)s'))
logger.addHandler(handler)
styrobot = Bot()
f = open('credentials.txt', 'r')
creds = f.read().splitlines()
email = creds[0]
password = creds[1]
f.close()
#styrobot.run(email, password)
styrobot.run(password)
|
|
# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
ZFS Storage Appliance Cinder Volume Driver
"""
import ast
import base64
from oslo_config import cfg
from oslo_log import log
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.volume.drivers.zfssa import zfssarest
from cinder.volume import volume_types
import taskflow.engines
from taskflow.patterns import linear_flow as lf
from taskflow import task
CONF = cfg.CONF
LOG = log.getLogger(__name__)
ZFSSA_OPTS = [
cfg.StrOpt('zfssa_pool',
help='Storage pool name.'),
cfg.StrOpt('zfssa_project',
help='Project name.'),
cfg.StrOpt('zfssa_lun_volblocksize', default='8k',
choices=['512', '1k', '2k', '4k', '8k', '16k', '32k', '64k',
'128k'],
help='Block size.'),
cfg.BoolOpt('zfssa_lun_sparse', default=False,
help='Flag to enable sparse (thin-provisioned): True, False.'),
cfg.StrOpt('zfssa_lun_compression', default='off',
choices=['off', 'lzjb', 'gzip-2', 'gzip', 'gzip-9'],
help='Data compression.'),
cfg.StrOpt('zfssa_lun_logbias', default='latency',
choices=['latency', 'throughput'],
help='Synchronous write bias.'),
cfg.StrOpt('zfssa_initiator_group', default='',
help='iSCSI initiator group.'),
cfg.StrOpt('zfssa_initiator', default='',
help='iSCSI initiator IQNs. (comma separated)'),
cfg.StrOpt('zfssa_initiator_user', default='',
help='iSCSI initiator CHAP user (name).'),
cfg.StrOpt('zfssa_initiator_password', default='',
help='Secret of the iSCSI initiator CHAP user.', secret=True),
cfg.StrOpt('zfssa_initiator_config', default='',
help='iSCSI initiators configuration.'),
cfg.StrOpt('zfssa_target_group', default='tgt-grp',
help='iSCSI target group name.'),
cfg.StrOpt('zfssa_target_user', default='',
help='iSCSI target CHAP user (name).'),
cfg.StrOpt('zfssa_target_password', default='', secret=True,
help='Secret of the iSCSI target CHAP user.'),
cfg.StrOpt('zfssa_target_portal',
help='iSCSI target portal (Data-IP:Port, w.x.y.z:3260).'),
cfg.StrOpt('zfssa_target_interfaces',
help='Network interfaces of iSCSI targets. (comma separated)'),
cfg.IntOpt('zfssa_rest_timeout',
help='REST connection timeout. (seconds)'),
cfg.StrOpt('zfssa_replication_ip', default='',
help='IP address used for replication data. (maybe the same as '
'data ip)')
]
CONF.register_opts(ZFSSA_OPTS)
ZFSSA_LUN_SPECS = {
'zfssa:volblocksize',
'zfssa:sparse',
'zfssa:compression',
'zfssa:logbias',
}
def factory_zfssa():
return zfssarest.ZFSSAApi()
class ZFSSAISCSIDriver(driver.ISCSIDriver):
"""ZFSSA Cinder iSCSI volume driver.
Version history:
1.0.1: Backend enabled volume migration.
"""
VERSION = '1.0.1'
protocol = 'iSCSI'
def __init__(self, *args, **kwargs):
super(ZFSSAISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(ZFSSA_OPTS)
self.configuration.append_config_values(san.san_opts)
self.zfssa = None
self.tgt_zfssa = None
self._stats = None
self.tgtiqn = None
def _get_target_alias(self):
"""return target alias."""
return self.configuration.zfssa_target_group
def do_setup(self, context):
"""Setup - create multiple elements.
Project, initiators, initiatorgroup, target and targetgroup.
"""
lcfg = self.configuration
LOG.info(_LI('Connecting to host: %s.'), lcfg.san_ip)
self.zfssa = factory_zfssa()
self.tgt_zfssa = factory_zfssa()
self.zfssa.set_host(lcfg.san_ip, timeout=lcfg.zfssa_rest_timeout)
auth_str = base64.encodestring('%s:%s' %
(lcfg.san_login,
lcfg.san_password))[:-1]
self.zfssa.login(auth_str)
self.zfssa.create_project(lcfg.zfssa_pool, lcfg.zfssa_project,
compression=lcfg.zfssa_lun_compression,
logbias=lcfg.zfssa_lun_logbias)
if (lcfg.zfssa_initiator_config != ''):
initiator_config = ast.literal_eval(lcfg.zfssa_initiator_config)
for initiator_group in initiator_config:
zfssa_initiator_group = initiator_group
for zfssa_initiator in initiator_config[zfssa_initiator_group]:
self.zfssa.create_initiator(zfssa_initiator['iqn'],
zfssa_initiator_group + '-' +
zfssa_initiator['iqn'],
chapuser=
zfssa_initiator['user'],
chapsecret=
zfssa_initiator['password'])
if (zfssa_initiator_group != 'default'):
self.zfssa.add_to_initiatorgroup(
zfssa_initiator['iqn'],
zfssa_initiator_group)
else:
LOG.warning(_LW('zfssa_initiator_config not found. '
'Using deprecated configuration options.'))
if (lcfg.zfssa_initiator != '' and
(lcfg.zfssa_initiator_group == '' or
lcfg.zfssa_initiator_group == 'default')):
LOG.warning(_LW('zfssa_initiator: %(ini)s'
' wont be used on '
'zfssa_initiator_group= %(inigrp)s.'),
{'ini': lcfg.zfssa_initiator,
'inigrp': lcfg.zfssa_initiator_group})
# Setup initiator and initiator group
if (lcfg.zfssa_initiator != '' and
lcfg.zfssa_initiator_group != '' and
lcfg.zfssa_initiator_group != 'default'):
for initiator in lcfg.zfssa_initiator.split(','):
self.zfssa.create_initiator(
initiator, lcfg.zfssa_initiator_group + '-' +
initiator, chapuser=lcfg.zfssa_initiator_user,
chapsecret=lcfg.zfssa_initiator_password)
self.zfssa.add_to_initiatorgroup(
initiator, lcfg.zfssa_initiator_group)
# Parse interfaces
interfaces = []
for interface in lcfg.zfssa_target_interfaces.split(','):
if interface == '':
continue
interfaces.append(interface)
# Setup target and target group
iqn = self.zfssa.create_target(
self._get_target_alias(),
interfaces,
tchapuser=lcfg.zfssa_target_user,
tchapsecret=lcfg.zfssa_target_password)
self.zfssa.add_to_targetgroup(iqn, lcfg.zfssa_target_group)
def check_for_setup_error(self):
"""Check that driver can login.
Check also pool, project, initiators, initiatorgroup, target and
targetgroup.
"""
lcfg = self.configuration
self.zfssa.verify_pool(lcfg.zfssa_pool)
self.zfssa.verify_project(lcfg.zfssa_pool, lcfg.zfssa_project)
if (lcfg.zfssa_initiator_config != ''):
initiator_config = ast.literal_eval(lcfg.zfssa_initiator_config)
for initiator_group in initiator_config:
zfssa_initiator_group = initiator_group
for zfssa_initiator in initiator_config[zfssa_initiator_group]:
self.zfssa.verify_initiator(zfssa_initiator['iqn'])
else:
if (lcfg.zfssa_initiator != '' and
lcfg.zfssa_initiator_group != '' and
lcfg.zfssa_initiator_group != 'default'):
for initiator in lcfg.zfssa_initiator.split(','):
self.zfssa.verify_initiator(initiator)
self.zfssa.verify_target(self._get_target_alias())
def _get_provider_info(self, volume, lun=None):
"""Return provider information."""
lcfg = self.configuration
if lun is None:
lun = self.zfssa.get_lun(lcfg.zfssa_pool,
lcfg.zfssa_project,
volume['name'])
if isinstance(lun['number'], list):
lun['number'] = lun['number'][0]
if self.tgtiqn is None:
self.tgtiqn = self.zfssa.get_target(self._get_target_alias())
loc = "%s %s %s" % (lcfg.zfssa_target_portal, self.tgtiqn,
lun['number'])
LOG.debug('_get_provider_info: provider_location: %s', loc)
provider = {'provider_location': loc}
if lcfg.zfssa_target_user != '' and lcfg.zfssa_target_password != '':
provider['provider_auth'] = ('CHAP %s %s' %
(lcfg.zfssa_target_user,
lcfg.zfssa_target_password))
return provider
def create_volume(self, volume):
"""Create a volume on ZFSSA."""
LOG.debug('zfssa.create_volume: volume=' + volume['name'])
lcfg = self.configuration
volsize = str(volume['size']) + 'g'
specs = self._get_voltype_specs(volume)
self.zfssa.create_lun(lcfg.zfssa_pool,
lcfg.zfssa_project,
volume['name'],
volsize,
lcfg.zfssa_target_group,
specs)
def delete_volume(self, volume):
"""Deletes a volume with the given volume['name']."""
LOG.debug('zfssa.delete_volume: name=%s', volume['name'])
lcfg = self.configuration
try:
lun2del = self.zfssa.get_lun(lcfg.zfssa_pool,
lcfg.zfssa_project,
volume['name'])
except exception.VolumeBackendAPIException as ex:
# NOTE(jdg): This will log an error and continue
# if for some reason the volume no longer exists
# on the backend
if 'Error Getting Volume' in ex.message:
LOG.error(_LE("Volume ID %s was not found on "
"the zfssa device while attempting "
"delete_volume operation."), volume['id'])
return
# Delete clone temp snapshot. see create_cloned_volume()
if 'origin' in lun2del and 'id' in volume:
if lun2del['nodestroy']:
self.zfssa.set_lun_props(lcfg.zfssa_pool,
lcfg.zfssa_project,
volume['name'],
nodestroy=False)
tmpsnap = 'tmp-snapshot-%s' % volume['id']
if lun2del['origin']['snapshot'] == tmpsnap:
self.zfssa.delete_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_project,
lun2del['origin']['share'],
lun2del['origin']['snapshot'])
return
self.zfssa.delete_lun(pool=lcfg.zfssa_pool,
project=lcfg.zfssa_project,
lun=volume['name'])
def create_snapshot(self, snapshot):
"""Creates a snapshot of a volume.
Snapshot name: snapshot['name']
Volume name: snapshot['volume_name']
"""
LOG.debug('zfssa.create_snapshot: snapshot=%s', snapshot['name'])
lcfg = self.configuration
self.zfssa.create_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_project,
snapshot['volume_name'],
snapshot['name'])
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
LOG.debug('zfssa.delete_snapshot: snapshot=%s', snapshot['name'])
lcfg = self.configuration
has_clones = self.zfssa.has_clones(lcfg.zfssa_pool,
lcfg.zfssa_project,
snapshot['volume_name'],
snapshot['name'])
if has_clones:
LOG.error(_LE('Snapshot %s: has clones'), snapshot['name'])
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
self.zfssa.delete_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_project,
snapshot['volume_name'],
snapshot['name'])
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot - clone a snapshot."""
LOG.debug('zfssa.create_volume_from_snapshot: volume=%s',
volume['name'])
LOG.debug('zfssa.create_volume_from_snapshot: snapshot=%s',
snapshot['name'])
if not self._verify_clone_size(snapshot, volume['size'] * units.Gi):
exception_msg = (_('Error verifying clone size on '
'Volume clone: %(clone)s '
'Size: %(size)d on'
'Snapshot: %(snapshot)s')
% {'clone': volume['name'],
'size': volume['size'],
'snapshot': snapshot['name']})
LOG.error(exception_msg)
raise exception.InvalidInput(reason=exception_msg)
lcfg = self.configuration
self.zfssa.clone_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_project,
snapshot['volume_name'],
snapshot['name'],
volume['name'])
def _update_volume_status(self):
"""Retrieve status info from volume group."""
LOG.debug("Updating volume status")
self._stats = None
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or self.__class__.__name__
data["vendor_name"] = 'Oracle'
data["driver_version"] = self.VERSION
data["storage_protocol"] = self.protocol
lcfg = self.configuration
(avail, total) = self.zfssa.get_pool_stats(lcfg.zfssa_pool)
if avail is None or total is None:
return
host = lcfg.san_ip
pool = lcfg.zfssa_pool
project = lcfg.zfssa_project
auth_str = base64.encodestring('%s:%s' %
(lcfg.san_login,
lcfg.san_password))[:-1]
zfssa_tgt_group = lcfg.zfssa_target_group
repl_ip = lcfg.zfssa_replication_ip
data['location_info'] = "%s:%s:%s:%s:%s:%s" % (host, auth_str, pool,
project,
zfssa_tgt_group,
repl_ip)
data['total_capacity_gb'] = int(total) / units.Gi
data['free_capacity_gb'] = int(avail) / units.Gi
data['reserved_percentage'] = 0
data['QoS_support'] = False
self._stats = data
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_status()
return self._stats
def create_export(self, context, volume, connector):
pass
def remove_export(self, context, volume):
pass
def ensure_export(self, context, volume):
pass
def extend_volume(self, volume, new_size):
"""Driver entry point to extent volume size."""
LOG.debug('extend_volume: volume name: %s', volume['name'])
lcfg = self.configuration
self.zfssa.set_lun_props(lcfg.zfssa_pool,
lcfg.zfssa_project,
volume['name'],
volsize=new_size * units.Gi)
def create_cloned_volume(self, volume, src_vref):
"""Create a clone of the specified volume."""
zfssa_snapshot = {'volume_name': src_vref['name'],
'name': 'tmp-snapshot-%s' % volume['id']}
self.create_snapshot(zfssa_snapshot)
try:
self.create_volume_from_snapshot(volume, zfssa_snapshot)
except exception.VolumeBackendAPIException:
LOG.error(_LE('Clone Volume:'
'%(volume)s failed from source volume:'
'%(src_vref)s'),
{'volume': volume['name'],
'src_vref': src_vref['name']})
# Cleanup snapshot
self.delete_snapshot(zfssa_snapshot)
def local_path(self, volume):
"""Not implemented."""
pass
def backup_volume(self, context, backup, backup_service):
"""Not implemented."""
pass
def restore_backup(self, context, backup, volume, backup_service):
"""Not implemented."""
pass
def _verify_clone_size(self, snapshot, size):
"""Check whether the clone size is the same as the parent volume."""
lcfg = self.configuration
lun = self.zfssa.get_lun(lcfg.zfssa_pool,
lcfg.zfssa_project,
snapshot['volume_name'])
return lun['size'] == size
def initialize_connection(self, volume, connector):
lcfg = self.configuration
init_groups = self.zfssa.get_initiator_initiatorgroup(
connector['initiator'])
for initiator_group in init_groups:
self.zfssa.set_lun_initiatorgroup(lcfg.zfssa_pool,
lcfg.zfssa_project,
volume['name'],
initiator_group)
iscsi_properties = {}
provider = self._get_provider_info(volume)
(target_portal, iqn, lun) = provider['provider_location'].split()
iscsi_properties['target_discovered'] = False
iscsi_properties['target_portal'] = target_portal
iscsi_properties['target_iqn'] = iqn
iscsi_properties['target_lun'] = lun
iscsi_properties['volume_id'] = volume['id']
if 'provider_auth' in provider:
(auth_method, auth_username, auth_password) = provider[
'provider_auth'].split()
iscsi_properties['auth_method'] = auth_method
iscsi_properties['auth_username'] = auth_username
iscsi_properties['auth_password'] = auth_password
return {
'driver_volume_type': 'iscsi',
'data': iscsi_properties
}
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to terminate a connection for a volume."""
LOG.debug('terminate_connection: volume name: %s.', volume['name'])
lcfg = self.configuration
self.zfssa.set_lun_initiatorgroup(lcfg.zfssa_pool,
lcfg.zfssa_project,
volume['name'],
'')
def _get_voltype_specs(self, volume):
"""Get specs suitable for volume creation."""
vtype = volume.get('volume_type_id', None)
extra_specs = None
if vtype:
extra_specs = volume_types.get_volume_type_extra_specs(vtype)
return self._get_specs(extra_specs)
def _get_specs(self, xspecs):
"""Return a dict with extra specs and/or config values."""
result = {}
for spc in ZFSSA_LUN_SPECS:
val = None
prop = spc.split(':')[1]
cfg = 'zfssa_lun_' + prop
if xspecs:
val = xspecs.pop(spc, None)
if val is None:
val = self.configuration.safe_get(cfg)
if val is not None and val != '':
result.update({prop: val})
return result
def migrate_volume(self, ctxt, volume, host):
LOG.debug('Attempting ZFSSA enabled volume migration. volume: %(id)s, '
'host: %(host)s, status=%(status)s.',
{'id': volume['id'],
'host': host,
'status': volume['status']})
lcfg = self.configuration
default_ret = (False, None)
if volume['status'] != "available":
LOG.debug('Only available volumes can be migrated using backend '
'assisted migration. Defaulting to generic migration.')
return default_ret
if (host['capabilities']['vendor_name'] != 'Oracle' or
host['capabilities']['storage_protocol'] != self.protocol):
LOG.debug('Source and destination drivers need to be Oracle iSCSI '
'to use backend assisted migration. Defaulting to '
'generic migration.')
return default_ret
if 'location_info' not in host['capabilities']:
LOG.debug('Could not find location_info in capabilities reported '
'by the destination driver. Defaulting to generic '
'migration.')
return default_ret
loc_info = host['capabilities']['location_info']
try:
(tgt_host, auth_str, tgt_pool, tgt_project, tgt_tgtgroup,
tgt_repl_ip) = loc_info.split(':')
except ValueError:
LOG.error(_LE("Location info needed for backend enabled volume "
"migration not in correct format: %s. Continuing "
"with generic volume migration."), loc_info)
return default_ret
if tgt_repl_ip == '':
msg = _LE("zfssa_replication_ip not set in cinder.conf. "
"zfssa_replication_ip is needed for backend enabled "
"volume migration. Continuing with generic volume "
"migration.")
LOG.error(msg)
return default_ret
src_pool = lcfg.zfssa_pool
src_project = lcfg.zfssa_project
try:
LOG.info(_LI('Connecting to target host: %s for backend enabled '
'migration.'), tgt_host)
self.tgt_zfssa.set_host(tgt_host)
self.tgt_zfssa.login(auth_str)
# Verify that the replication service is online
try:
self.zfssa.verify_service('replication')
self.tgt_zfssa.verify_service('replication')
except exception.VolumeBackendAPIException:
return default_ret
# ensure that a target group by the same name exists on the target
# system also, if not, use default migration.
lun = self.zfssa.get_lun(src_pool, src_project, volume['name'])
if lun['targetgroup'] != tgt_tgtgroup:
return default_ret
tgt_asn = self.tgt_zfssa.get_asn()
src_asn = self.zfssa.get_asn()
# verify on the source system that the destination has been
# registered as a replication target
tgts = self.zfssa.get_replication_targets()
targets = []
for target in tgts['targets']:
if target['asn'] == tgt_asn:
targets.append(target)
if targets == []:
LOG.debug('Target host: %(host)s for volume migration '
'not configured as a replication target '
'for volume: %(vol)s.',
{'host': tgt_repl_ip,
'vol': volume['name']})
return default_ret
# Multiple ips from the same appliance may be configured
# as different targets
for target in targets:
if target['address'] == tgt_repl_ip + ':216':
break
if target['address'] != tgt_repl_ip + ':216':
LOG.debug('Target with replication ip: %s not configured on '
'the source appliance for backend enabled volume '
'migration. Proceeding with default migration.',
tgt_repl_ip)
return default_ret
flow = lf.Flow('zfssa_volume_migration').add(
MigrateVolumeInit(),
MigrateVolumeCreateAction(provides='action_id'),
MigrateVolumeSendReplUpdate(),
MigrateVolumeSeverRepl(),
MigrateVolumeMoveVol(),
MigrateVolumeCleanUp()
)
taskflow.engines.run(flow,
store={'driver': self,
'tgt_zfssa': self.tgt_zfssa,
'tgt_pool': tgt_pool,
'tgt_project': tgt_project,
'volume': volume, 'tgt_asn': tgt_asn,
'src_zfssa': self.zfssa,
'src_asn': src_asn,
'src_pool': src_pool,
'src_project': src_project,
'target': target})
return(True, None)
except Exception:
LOG.error(_LE("Error migrating volume: %s"), volume['name'])
raise
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update for migrated volume.
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:return model_update to update DB with any needed changes
"""
lcfg = self.configuration
original_name = CONF.volume_name_template % volume['id']
current_name = CONF.volume_name_template % new_volume['id']
LOG.debug('Renaming migrated volume: %(cur)s to %(org)s',
{'cur': current_name,
'org': original_name})
self.zfssa.set_lun_props(lcfg.zfssa_pool, lcfg.zfssa_project,
current_name, name=original_name)
return {'_name_id': None}
class MigrateVolumeInit(task.Task):
def execute(self, src_zfssa, volume, src_pool, src_project):
LOG.debug('Setting inherit flag on source backend to False.')
src_zfssa.edit_inherit_replication_flag(src_pool, src_project,
volume['name'], set=False)
def revert(self, src_zfssa, volume, src_pool, src_project, **kwargs):
LOG.debug('Rollback: Setting inherit flag on source appliance to '
'True.')
src_zfssa.edit_inherit_replication_flag(src_pool, src_project,
volume['name'], set=True)
class MigrateVolumeCreateAction(task.Task):
def execute(self, src_zfssa, volume, src_pool, src_project, target,
tgt_pool):
LOG.debug('Creating replication action on source appliance.')
action_id = src_zfssa.create_replication_action(src_pool,
src_project,
target['label'],
tgt_pool,
volume['name'])
self._action_id = action_id
return action_id
def revert(self, src_zfssa, **kwargs):
if hasattr(self, '_action_id'):
LOG.debug('Rollback: deleting replication action on source '
'appliance.')
src_zfssa.delete_replication_action(self._action_id)
class MigrateVolumeSendReplUpdate(task.Task):
def execute(self, src_zfssa, action_id):
LOG.debug('Sending replication update from source appliance.')
src_zfssa.send_repl_update(action_id)
LOG.debug('Deleting replication action on source appliance.')
src_zfssa.delete_replication_action(action_id)
self._action_deleted = True
class MigrateVolumeSeverRepl(task.Task):
def execute(self, tgt_zfssa, src_asn, action_id, driver):
source = tgt_zfssa.get_replication_source(src_asn)
if not source:
err = (_('Source with host ip/name: %s not found on the '
'target appliance for backend enabled volume '
'migration, procedding with default migration.'),
driver.configuration.san_ip)
LOG.error(err)
raise exception.VolumeBackendAPIException(data=err)
LOG.debug('Severing replication package on destination appliance.')
tgt_zfssa.sever_replication(action_id, source['name'],
project=action_id)
class MigrateVolumeMoveVol(task.Task):
def execute(self, tgt_zfssa, tgt_pool, tgt_project, action_id, volume):
LOG.debug('Moving LUN to destination project on destination '
'appliance.')
tgt_zfssa.move_volume(tgt_pool, action_id, volume['name'], tgt_project)
LOG.debug('Deleting temporary project on destination appliance.')
tgt_zfssa.delete_project(tgt_pool, action_id)
self._project_deleted = True
def revert(self, tgt_zfssa, tgt_pool, tgt_project, action_id, volume,
**kwargs):
if not hasattr(self, '_project_deleted'):
LOG.debug('Rollback: deleting temporary project on destination '
'appliance.')
tgt_zfssa.delete_project(tgt_pool, action_id)
class MigrateVolumeCleanUp(task.Task):
def execute(self, driver, volume, tgt_zfssa):
LOG.debug('Finally, delete source volume on source appliance.')
driver.delete_volume(volume)
tgt_zfssa.logout()
|
|
# Created by Pearu Peterson, September 2002
from __future__ import division, print_function, absolute_import
from numpy.testing import (assert_, assert_equal, assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_array_less,
assert_allclose)
import pytest
from pytest import raises as assert_raises
from scipy.fft._pocketfft import (ifft, fft, fftn, ifftn,
rfft, irfft, rfftn, irfftn, fft2)
from numpy import (arange, add, array, asarray, zeros, dot, exp, pi,
swapaxes, cdouble)
import numpy as np
import numpy.fft
from numpy.random import rand
# "large" composite numbers supported by FFT._PYPOCKETFFT
LARGE_COMPOSITE_SIZES = [
2**13,
2**5 * 3**5,
2**3 * 3**3 * 5**2,
]
SMALL_COMPOSITE_SIZES = [
2,
2*3*5,
2*2*3*3,
]
# prime
LARGE_PRIME_SIZES = [
2011
]
SMALL_PRIME_SIZES = [
29
]
def _assert_close_in_norm(x, y, rtol, size, rdt):
# helper function for testing
err_msg = "size: %s rdt: %s" % (size, rdt)
assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg)
def random(size):
return rand(*size)
def get_mat(n):
data = arange(n)
data = add.outer(data, data)
return data
def direct_dft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = -arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)
return y
def direct_idft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)/n
return y
def direct_dftn(x):
x = asarray(x)
for axis in range(len(x.shape)):
x = fft(x, axis=axis)
return x
def direct_idftn(x):
x = asarray(x)
for axis in range(len(x.shape)):
x = ifft(x, axis=axis)
return x
def direct_rdft(x):
x = asarray(x)
n = len(x)
w = -arange(n)*(2j*pi/n)
y = zeros(n//2+1, dtype=cdouble)
for i in range(n//2+1):
y[i] = dot(exp(i*w), x)
return y
def direct_irdft(x, n):
x = asarray(x)
x1 = zeros(n, dtype=cdouble)
for i in range(n//2+1):
x1[i] = x[i]
if i > 0 and 2*i < n:
x1[n-i] = np.conj(x[i])
return direct_idft(x1).real
def direct_rdftn(x):
return fftn(rfft(x), axes=range(x.ndim - 1))
class _TestFFTBase(object):
def setup_method(self):
self.cdt = None
self.rdt = None
np.random.seed(1234)
def test_definition(self):
x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt)
y = fft(x)
assert_equal(y.dtype, self.cdt)
y1 = direct_dft(x)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4+0j,5], dtype=self.cdt)
assert_array_almost_equal(fft(x),direct_dft(x))
def test_n_argument_real(self):
x1 = np.array([1,2,3,4], dtype=self.rdt)
x2 = np.array([1,2,3,4], dtype=self.rdt)
y = fft([x1,x2],n=4)
assert_equal(y.dtype, self.cdt)
assert_equal(y.shape,(2,4))
assert_array_almost_equal(y[0],direct_dft(x1))
assert_array_almost_equal(y[1],direct_dft(x2))
def _test_n_argument_complex(self):
x1 = np.array([1,2,3,4+1j], dtype=self.cdt)
x2 = np.array([1,2,3,4+1j], dtype=self.cdt)
y = fft([x1,x2],n=4)
assert_equal(y.dtype, self.cdt)
assert_equal(y.shape,(2,4))
assert_array_almost_equal(y[0],direct_dft(x1))
assert_array_almost_equal(y[1],direct_dft(x2))
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = np.arange(n)
y = fft(x.astype(complex))
y2 = numpy.fft.fft(x)
assert_array_almost_equal(y,y2)
y = fft(x)
assert_array_almost_equal(y,y2)
def test_invalid_sizes(self):
assert_raises(ValueError, fft, [])
assert_raises(ValueError, fft, [[1,1],[2,2]], -5)
class TestLongDoubleFFT(_TestFFTBase):
def setup_method(self):
self.cdt = np.longcomplex
self.rdt = np.longdouble
class TestDoubleFFT(_TestFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestSingleFFT(_TestFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
class TestFloat16FFT(object):
def test_1_argument_real(self):
x1 = np.array([1, 2, 3, 4], dtype=np.float16)
y = fft(x1, n=4)
assert_equal(y.dtype, np.complex64)
assert_equal(y.shape, (4, ))
assert_array_almost_equal(y, direct_dft(x1.astype(np.float32)))
def test_n_argument_real(self):
x1 = np.array([1, 2, 3, 4], dtype=np.float16)
x2 = np.array([1, 2, 3, 4], dtype=np.float16)
y = fft([x1, x2], n=4)
assert_equal(y.dtype, np.complex64)
assert_equal(y.shape, (2, 4))
assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32)))
assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32)))
class _TestIFFTBase(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt)
y = ifft(x)
y1 = direct_idft(x)
assert_equal(y.dtype, self.cdt)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4+0j,5], self.cdt)
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_definition_real(self):
x = np.array([1,2,3,4,1,2,3,4], self.rdt)
y = ifft(x)
assert_equal(y.dtype, self.cdt)
y1 = direct_idft(x)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4,5], dtype=self.rdt)
assert_equal(y.dtype, self.cdt)
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = np.arange(n)
y = ifft(x.astype(self.cdt))
y2 = numpy.fft.ifft(x)
assert_allclose(y,y2, rtol=self.rtol, atol=self.atol)
y = ifft(x)
assert_allclose(y,y2, rtol=self.rtol, atol=self.atol)
def test_random_complex(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.cdt)
x = random([size]).astype(self.cdt) + 1j*x
y1 = ifft(fft(x))
y2 = fft(ifft(x))
assert_equal(y1.dtype, self.cdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x)
assert_array_almost_equal(y2, x)
def test_random_real(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.rdt)
y1 = ifft(fft(x))
y2 = fft(ifft(x))
assert_equal(y1.dtype, self.cdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x)
assert_array_almost_equal(y2, x)
def test_size_accuracy(self):
# Sanity check for the accuracy for prime and non-prime sized inputs
for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size).astype(self.rdt)
y = ifft(fft(x))
_assert_close_in_norm(x, y, self.rtol, size, self.rdt)
y = fft(ifft(x))
_assert_close_in_norm(x, y, self.rtol, size, self.rdt)
x = (x + 1j*np.random.rand(size)).astype(self.cdt)
y = ifft(fft(x))
_assert_close_in_norm(x, y, self.rtol, size, self.rdt)
y = fft(ifft(x))
_assert_close_in_norm(x, y, self.rtol, size, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, ifft, [])
assert_raises(ValueError, ifft, [[1,1],[2,2]], -5)
@pytest.mark.skipif(np.longdouble is np.float64,
reason="Long double is aliased to double")
class TestLongDoubleIFFT(_TestIFFTBase):
def setup_method(self):
self.cdt = np.longcomplex
self.rdt = np.longdouble
self.rtol = 1e-10
self.atol = 1e-10
class TestDoubleIFFT(_TestIFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
self.rtol = 1e-10
self.atol = 1e-10
class TestSingleIFFT(_TestIFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
self.rtol = 1e-5
self.atol = 1e-4
class _TestRFFTBase(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]:
x = np.array(t, dtype=self.rdt)
y = rfft(x)
y1 = direct_rdft(x)
assert_array_almost_equal(y,y1)
assert_equal(y.dtype, self.cdt)
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = np.arange(n)
y1 = np.fft.rfft(x)
y = rfft(x)
assert_array_almost_equal(y,y1)
def test_invalid_sizes(self):
assert_raises(ValueError, rfft, [])
assert_raises(ValueError, rfft, [[1,1],[2,2]], -5)
def test_complex_input(self):
x = np.zeros(10, dtype=self.cdt)
with assert_raises(TypeError, match="x must be a real sequence"):
rfft(x)
# See gh-5790
class MockSeries(object):
def __init__(self, data):
self.data = np.asarray(data)
def __getattr__(self, item):
try:
return getattr(self.data, item)
except AttributeError:
raise AttributeError(("'MockSeries' object "
"has no attribute '{attr}'".
format(attr=item)))
def test_non_ndarray_with_dtype(self):
x = np.array([1., 2., 3., 4., 5.])
xs = _TestRFFTBase.MockSeries(x)
expected = [1, 2, 3, 4, 5]
rfft(xs)
# Data should not have been overwritten
assert_equal(x, expected)
assert_equal(xs.data, expected)
@pytest.mark.skipif(np.longfloat is np.float64,
reason="Long double is aliased to double")
class TestRFFTLongDouble(_TestRFFTBase):
def setup_method(self):
self.cdt = np.longcomplex
self.rdt = np.longfloat
class TestRFFTDouble(_TestRFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestRFFTSingle(_TestRFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
class _TestIRFFTBase(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x1 = [1,2+3j,4+1j,1+2j,3+4j]
x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j]
x1 = x1_1[:5]
x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j]
x2 = x2_1[:5]
def _test(x, xr):
y = irfft(np.array(x, dtype=self.cdt), n=len(xr))
y1 = direct_irdft(x, len(xr))
assert_equal(y.dtype, self.rdt)
assert_array_almost_equal(y,y1, decimal=self.ndec)
assert_array_almost_equal(y,ifft(xr), decimal=self.ndec)
_test(x1, x1_1)
_test(x2, x2_1)
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = np.arange(-1, n, 2) + 1j * np.arange(0, n+1, 2)
x[0] = 0
if n % 2 == 0:
x[-1] = np.real(x[-1])
y1 = np.fft.irfft(x)
y = irfft(x)
assert_array_almost_equal(y,y1)
def test_random_real(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.rdt)
y1 = irfft(rfft(x), n=size)
y2 = rfft(irfft(x, n=(size*2-1)))
assert_equal(y1.dtype, self.rdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x, decimal=self.ndec,
err_msg="size=%d" % size)
assert_array_almost_equal(y2, x, decimal=self.ndec,
err_msg="size=%d" % size)
def test_size_accuracy(self):
# Sanity check for the accuracy for prime and non-prime sized inputs
if self.rdt == np.float32:
rtol = 1e-5
elif self.rdt == np.float64:
rtol = 1e-10
for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size).astype(self.rdt)
y = irfft(rfft(x), len(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
y = rfft(irfft(x, 2 * len(x) - 1))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, irfft, [])
assert_raises(ValueError, irfft, [[1,1],[2,2]], -5)
# self.ndec is bogus; we should have a assert_array_approx_equal for number of
# significant digits
@pytest.mark.skipif(np.longfloat is np.float64,
reason="Long double is aliased to double")
class TestIRFFTLongDouble(_TestIRFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
self.ndec = 14
class TestIRFFTDouble(_TestIRFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
self.ndec = 14
class TestIRFFTSingle(_TestIRFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
self.ndec = 5
class Testfft2(object):
def setup_method(self):
np.random.seed(1234)
def test_regression_244(self):
"""FFT returns wrong result with axes parameter."""
# fftn (and hence fft2) used to break when both axes and shape were
# used
x = numpy.ones((4, 4, 2))
y = fft2(x, s=(8, 8), axes=(-3, -2))
y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2))
assert_array_almost_equal(y, y_r)
def test_invalid_sizes(self):
assert_raises(ValueError, fft2, [[]])
assert_raises(ValueError, fft2, [[1, 1], [2, 2]], (4, -3))
class TestFftnSingle(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(np.array(x, np.float32))
assert_(y.dtype == np.complex64,
msg="double precision output with single precision")
y_r = np.array(fftn(x), np.complex64)
assert_array_almost_equal_nulp(y, y_r)
@pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
def test_size_accuracy_small(self, size):
x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
y1 = fftn(x.real.astype(np.float32))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2000)
@pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
def test_size_accuracy_large(self, size):
x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
y1 = fftn(x.real.astype(np.float32))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2000)
def test_definition_float16(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(np.array(x, np.float16))
assert_equal(y.dtype, np.complex64)
y_r = np.array(fftn(x), np.complex64)
assert_array_almost_equal_nulp(y, y_r)
@pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
def test_float16_input_small(self, size):
x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
y1 = fftn(x.real.astype(np.float16))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 5e5)
@pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
def test_float16_input_large(self, size):
x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
y1 = fftn(x.real.astype(np.float16))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2e6)
class TestFftn(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(x)
assert_array_almost_equal(y, direct_dftn(x))
x = random((20, 26))
assert_array_almost_equal(fftn(x), direct_dftn(x))
x = random((5, 4, 3, 20))
assert_array_almost_equal(fftn(x), direct_dftn(x))
def test_axes_argument(self):
# plane == ji_plane, x== kji_space
plane1 = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
plane2 = [[10, 11, 12],
[13, 14, 15],
[16, 17, 18]]
plane3 = [[19, 20, 21],
[22, 23, 24],
[25, 26, 27]]
ki_plane1 = [[1, 2, 3],
[10, 11, 12],
[19, 20, 21]]
ki_plane2 = [[4, 5, 6],
[13, 14, 15],
[22, 23, 24]]
ki_plane3 = [[7, 8, 9],
[16, 17, 18],
[25, 26, 27]]
jk_plane1 = [[1, 10, 19],
[4, 13, 22],
[7, 16, 25]]
jk_plane2 = [[2, 11, 20],
[5, 14, 23],
[8, 17, 26]]
jk_plane3 = [[3, 12, 21],
[6, 15, 24],
[9, 18, 27]]
kj_plane1 = [[1, 4, 7],
[10, 13, 16], [19, 22, 25]]
kj_plane2 = [[2, 5, 8],
[11, 14, 17], [20, 23, 26]]
kj_plane3 = [[3, 6, 9],
[12, 15, 18], [21, 24, 27]]
ij_plane1 = [[1, 4, 7],
[2, 5, 8],
[3, 6, 9]]
ij_plane2 = [[10, 13, 16],
[11, 14, 17],
[12, 15, 18]]
ij_plane3 = [[19, 22, 25],
[20, 23, 26],
[21, 24, 27]]
ik_plane1 = [[1, 10, 19],
[2, 11, 20],
[3, 12, 21]]
ik_plane2 = [[4, 13, 22],
[5, 14, 23],
[6, 15, 24]]
ik_plane3 = [[7, 16, 25],
[8, 17, 26],
[9, 18, 27]]
ijk_space = [jk_plane1, jk_plane2, jk_plane3]
ikj_space = [kj_plane1, kj_plane2, kj_plane3]
jik_space = [ik_plane1, ik_plane2, ik_plane3]
jki_space = [ki_plane1, ki_plane2, ki_plane3]
kij_space = [ij_plane1, ij_plane2, ij_plane3]
x = array([plane1, plane2, plane3])
assert_array_almost_equal(fftn(x),
fftn(x, axes=(-3, -2, -1))) # kji_space
assert_array_almost_equal(fftn(x), fftn(x, axes=(0, 1, 2)))
assert_array_almost_equal(fftn(x, axes=(0, 2)), fftn(x, axes=(0, -1)))
y = fftn(x, axes=(2, 1, 0)) # ijk_space
assert_array_almost_equal(swapaxes(y, -1, -3), fftn(ijk_space))
y = fftn(x, axes=(2, 0, 1)) # ikj_space
assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -1, -2),
fftn(ikj_space))
y = fftn(x, axes=(1, 2, 0)) # jik_space
assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -3, -2),
fftn(jik_space))
y = fftn(x, axes=(1, 0, 2)) # jki_space
assert_array_almost_equal(swapaxes(y, -2, -3), fftn(jki_space))
y = fftn(x, axes=(0, 2, 1)) # kij_space
assert_array_almost_equal(swapaxes(y, -2, -1), fftn(kij_space))
y = fftn(x, axes=(-2, -1)) # ji_plane
assert_array_almost_equal(fftn(plane1), y[0])
assert_array_almost_equal(fftn(plane2), y[1])
assert_array_almost_equal(fftn(plane3), y[2])
y = fftn(x, axes=(1, 2)) # ji_plane
assert_array_almost_equal(fftn(plane1), y[0])
assert_array_almost_equal(fftn(plane2), y[1])
assert_array_almost_equal(fftn(plane3), y[2])
y = fftn(x, axes=(-3, -2)) # kj_plane
assert_array_almost_equal(fftn(x[:, :, 0]), y[:, :, 0])
assert_array_almost_equal(fftn(x[:, :, 1]), y[:, :, 1])
assert_array_almost_equal(fftn(x[:, :, 2]), y[:, :, 2])
y = fftn(x, axes=(-3, -1)) # ki_plane
assert_array_almost_equal(fftn(x[:, 0, :]), y[:, 0, :])
assert_array_almost_equal(fftn(x[:, 1, :]), y[:, 1, :])
assert_array_almost_equal(fftn(x[:, 2, :]), y[:, 2, :])
y = fftn(x, axes=(-1, -2)) # ij_plane
assert_array_almost_equal(fftn(ij_plane1), swapaxes(y[0], -2, -1))
assert_array_almost_equal(fftn(ij_plane2), swapaxes(y[1], -2, -1))
assert_array_almost_equal(fftn(ij_plane3), swapaxes(y[2], -2, -1))
y = fftn(x, axes=(-1, -3)) # ik_plane
assert_array_almost_equal(fftn(ik_plane1),
swapaxes(y[:, 0, :], -1, -2))
assert_array_almost_equal(fftn(ik_plane2),
swapaxes(y[:, 1, :], -1, -2))
assert_array_almost_equal(fftn(ik_plane3),
swapaxes(y[:, 2, :], -1, -2))
y = fftn(x, axes=(-2, -3)) # jk_plane
assert_array_almost_equal(fftn(jk_plane1),
swapaxes(y[:, :, 0], -1, -2))
assert_array_almost_equal(fftn(jk_plane2),
swapaxes(y[:, :, 1], -1, -2))
assert_array_almost_equal(fftn(jk_plane3),
swapaxes(y[:, :, 2], -1, -2))
y = fftn(x, axes=(-1,)) # i_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[i, j, :]), y[i, j, :])
y = fftn(x, axes=(-2,)) # j_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[i, :, j]), y[i, :, j])
y = fftn(x, axes=(0,)) # k_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[:, i, j]), y[:, i, j])
y = fftn(x, axes=()) # point
assert_array_almost_equal(y, x)
def test_shape_argument(self):
small_x = [[1, 2, 3],
[4, 5, 6]]
large_x1 = [[1, 2, 3, 0],
[4, 5, 6, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
y = fftn(small_x, s=(4, 4))
assert_array_almost_equal(y, fftn(large_x1))
y = fftn(small_x, s=(3, 4))
assert_array_almost_equal(y, fftn(large_x1[:-1]))
def test_shape_axes_argument(self):
small_x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
large_x1 = array([[1, 2, 3, 0],
[4, 5, 6, 0],
[7, 8, 9, 0],
[0, 0, 0, 0]])
y = fftn(small_x, s=(4, 4), axes=(-2, -1))
assert_array_almost_equal(y, fftn(large_x1))
y = fftn(small_x, s=(4, 4), axes=(-1, -2))
assert_array_almost_equal(y, swapaxes(
fftn(swapaxes(large_x1, -1, -2)), -1, -2))
def test_shape_axes_argument2(self):
# Change shape of the last axis
x = numpy.random.random((10, 5, 3, 7))
y = fftn(x, axes=(-1,), s=(8,))
assert_array_almost_equal(y, fft(x, axis=-1, n=8))
# Change shape of an arbitrary axis which is not the last one
x = numpy.random.random((10, 5, 3, 7))
y = fftn(x, axes=(-2,), s=(8,))
assert_array_almost_equal(y, fft(x, axis=-2, n=8))
# Change shape of axes: cf #244, where shape and axes were mixed up
x = numpy.random.random((4, 4, 2))
y = fftn(x, axes=(-3, -2), s=(8, 8))
assert_array_almost_equal(y,
numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8)))
def test_shape_argument_more(self):
x = zeros((4, 4, 2))
with assert_raises(ValueError,
match="shape requires more axes than are present"):
fftn(x, s=(8, 8, 2, 1))
def test_invalid_sizes(self):
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[1, 0\]\) specified"):
fftn([[]])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[4, -3\]\) specified"):
fftn([[1, 1], [2, 2]], (4, -3))
def test_no_axes(self):
x = numpy.random.random((2,2,2))
assert_allclose(fftn(x, axes=[]), x, atol=1e-7)
class TestIfftn(object):
dtype = None
cdtype = None
def setup_method(self):
np.random.seed(1234)
@pytest.mark.parametrize('dtype,cdtype,maxnlp',
[(np.float64, np.complex128, 2000),
(np.float32, np.complex64, 3500)])
def test_definition(self, dtype, cdtype, maxnlp):
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=dtype)
y = ifftn(x)
assert_equal(y.dtype, cdtype)
assert_array_almost_equal_nulp(y, direct_idftn(x), maxnlp)
x = random((20, 26))
assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
x = random((5, 4, 3, 20))
assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
@pytest.mark.parametrize('maxnlp', [2000, 3500])
@pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])
def test_random_complex(self, maxnlp, size):
x = random([size, size]) + 1j*random([size, size])
assert_array_almost_equal_nulp(ifftn(fftn(x)), x, maxnlp)
assert_array_almost_equal_nulp(fftn(ifftn(x)), x, maxnlp)
def test_invalid_sizes(self):
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[1, 0\]\) specified"):
ifftn([[]])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[4, -3\]\) specified"):
ifftn([[1, 1], [2, 2]], (4, -3))
def test_no_axes(self):
x = numpy.random.random((2,2,2))
assert_allclose(ifftn(x, axes=[]), x, atol=1e-7)
class TestRfftn(object):
dtype = None
cdtype = None
def setup_method(self):
np.random.seed(1234)
@pytest.mark.parametrize('dtype,cdtype,maxnlp',
[(np.float64, np.complex128, 2000),
(np.float32, np.complex64, 3500)])
def test_definition(self, dtype, cdtype, maxnlp):
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=dtype)
y = rfftn(x)
assert_equal(y.dtype, cdtype)
assert_array_almost_equal_nulp(y, direct_rdftn(x), maxnlp)
x = random((20, 26))
assert_array_almost_equal_nulp(rfftn(x), direct_rdftn(x), maxnlp)
x = random((5, 4, 3, 20))
assert_array_almost_equal_nulp(rfftn(x), direct_rdftn(x), maxnlp)
@pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])
def test_random(self, size):
x = random([size, size])
assert_allclose(irfftn(rfftn(x), x.shape), x, atol=1e-10)
@pytest.mark.parametrize('func', [rfftn, irfftn])
def test_invalid_sizes(self, func):
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[1, 0\]\) specified"):
func([[]])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[4, -3\]\) specified"):
func([[1, 1], [2, 2]], (4, -3))
@pytest.mark.parametrize('func', [rfftn, irfftn])
def test_no_axes(self, func):
with assert_raises(ValueError,
match="at least 1 axis must be transformed"):
func([], axes=[])
def test_complex_input(self):
with assert_raises(TypeError, match="x must be a real sequence"):
rfftn(np.zeros(10, dtype=np.complex64))
class FakeArray(object):
def __init__(self, data):
self._data = data
self.__array_interface__ = data.__array_interface__
class FakeArray2(object):
def __init__(self, data):
self._data = data
def __array__(self):
return self._data
# TODO: Is this test actually valuable? The behavior it's testing shouldn't be
# relied upon by users except for overwrite_x = False
class TestOverwrite(object):
"""Check input overwrite behavior of the FFT functions."""
real_dtypes = [np.float32, np.float64, np.longfloat]
dtypes = real_dtypes + [np.complex64, np.complex128, np.longcomplex]
fftsizes = [8, 16, 32]
def _check(self, x, routine, fftsize, axis, overwrite_x, should_overwrite):
x2 = x.copy()
for fake in [lambda x: x, FakeArray, FakeArray2]:
routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x)
sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % (
routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
if not should_overwrite:
assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes,
fftsize, overwrite_x):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
should_overwrite = (overwrite_x
and dtype in overwritable_dtypes
and fftsize <= shape[axis])
self._check(data, routine, fftsize, axis,
overwrite_x=overwrite_x,
should_overwrite=should_overwrite)
@pytest.mark.parametrize('dtype', dtypes)
@pytest.mark.parametrize('fftsize', fftsizes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), -1),
((16, 2), 0),
((2, 16), 1)])
def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes):
overwritable = (np.longcomplex, np.complex128, np.complex64)
self._check_1d(fft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
self._check_1d(ifft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
@pytest.mark.parametrize('dtype', real_dtypes)
@pytest.mark.parametrize('fftsize', fftsizes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), -1),
((16, 2), 0),
((2, 16), 1)])
def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes):
overwritable = self.real_dtypes
self._check_1d(irfft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
self._check_1d(rfft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes,
overwrite_x):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
def fftshape_iter(shp):
if len(shp) <= 0:
yield ()
else:
for j in (shp[0]//2, shp[0], shp[0]*2):
for rest in fftshape_iter(shp[1:]):
yield (j,) + rest
def part_shape(shape, axes):
if axes is None:
return shape
else:
return tuple(np.take(shape, axes))
def should_overwrite(data, shape, axes):
s = part_shape(data.shape, axes)
return (overwrite_x and
np.prod(shape) <= np.prod(s)
and dtype in overwritable_dtypes)
for fftshape in fftshape_iter(part_shape(shape, axes)):
self._check(data, routine, fftshape, axes,
overwrite_x=overwrite_x,
should_overwrite=should_overwrite(data, fftshape, axes))
if data.ndim > 1:
# check fortran order
self._check(data.T, routine, fftshape, axes,
overwrite_x=overwrite_x,
should_overwrite=should_overwrite(
data.T, fftshape, axes))
@pytest.mark.parametrize('dtype', dtypes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), None),
((16,), (0,)),
((16, 2), (0,)),
((2, 16), (1,)),
((8, 16), None),
((8, 16), (0, 1)),
((8, 16, 2), (0, 1)),
((8, 16, 2), (1, 2)),
((8, 16, 2), (0,)),
((8, 16, 2), (1,)),
((8, 16, 2), (2,)),
((8, 16, 2), None),
((8, 16, 2), (0, 1, 2))])
def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes):
overwritable = (np.longcomplex, np.complex128, np.complex64)
self._check_nd_one(fftn, dtype, shape, axes, overwritable,
overwrite_x)
self._check_nd_one(ifftn, dtype, shape, axes, overwritable,
overwrite_x)
@pytest.mark.parametrize('func', [fft, ifft, fftn, ifftn,
rfft, irfft, rfftn, irfftn])
def test_invalid_norm(func):
x = np.arange(10, dtype=float)
with assert_raises(ValueError,
match='Invalid norm value o, should be None or "ortho"'):
func(x, norm='o')
|
|
import logging
from datetime import datetime
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPUnauthorized
from pyramid.security import forget
from pyramid.response import Response
from channelstream import total_messages, started_on, total_unique_messages
from channelstream.user import User, users
from channelstream.connection import Connection, connections
from channelstream.channel import Channel, channels
from channelstream.ext_json import json
log = logging.getLogger(__name__)
class ServerViews(object):
def __init__(self, request):
self.request = request
@view_config(route_name='action', match_param='action=connect',
renderer='json', permission='access')
def connect(self):
"""return the id of connected users - will be secured with password string
for webapp to internally call the server - we combine conn string with user id,
and we tell which channels the user is allowed to subscribe to"""
user_name = self.request.json_body.get('user')
def_status = self.request.registry.server_config['status_codes'][
'online']
user_status = int(self.request.json_body.get('status', def_status))
conn_id = self.request.json_body.get('conn_id')
subscribe_to_channels = self.request.json_body.get('channels')
if user_name is None:
self.request.response.status = 400
return {'error': "No username specified"}
if not subscribe_to_channels:
self.request.response.status = 400
return {'error': "No channels specified"}
# everything is ok so lets add new connection to channel and connection list
if not user_name in users:
user = User(user_name, def_status)
users[user_name] = user
else:
user = users[user_name]
connection = Connection(user_name, conn_id)
if not connection.id in connections:
connections[connection.id] = connection
user.add_connection(connection)
for channel_name in subscribe_to_channels:
# user gets assigned to a channel
if channel_name not in channels:
channel = Channel(channel_name)
channels[channel_name] = channel
channels[channel_name].add_connection(connection)
log.info('connecting %s with uuid %s' % (user_name, connection.id))
return {'conn_id': connection.id, 'status': user.status}
@view_config(route_name='action', match_param='action=subscribe',
renderer='json', permission='access')
def subscribe(self, *args):
""" call this to subscribe specific connection to new channels """
conn_id = self.request.json_body.get('conn_id',
self.request.GET.get('conn_id'))
connection = connections.get(conn_id)
subscribe_to_channels = self.request.json_body.get('channels')
if not connection:
self.request.response.status = 403
return {'error': "Unknown connection"}
if not subscribe_to_channels:
self.request.response.status = 400
return {'error': "No channels specified"}
# everything is ok so lets add new connection to channel and connection list
# lets lock it just in case
# find the right user
user = users.get(connection.user_name)
subscribed_channels = []
if user:
for channel_name in subscribe_to_channels:
if channel_name not in channels:
channel = Channel(channel_name)
channels[channel_name] = channel
channels[channel_name].add_connection(connection)
for channel in channels.itervalues():
if user.user_name in channel.connections:
subscribed_channels.append(channel.name)
return subscribed_channels
@view_config(route_name='action', match_param='action=listen',
request_method="OPTIONS", renderer='string')
def handle_CORS(self):
self.request.response.headers.add('Access-Control-Allow-Origin', '*')
self.request.response.headers.add('XDomainRequestAllowed', '1')
self.request.response.headers.add('Access-Control-Allow-Methods',
'GET, POST, OPTIONS, PUT')
self.request.response.headers.add('Access-Control-Allow-Headers',
'Content-Type, Depth, User-Agent, '
'X-File-Size, X-Requested-With, '
'If-Modified-Since, X-File-Name, '
'Cache-Control, Pragma, Origin, '
'Connection, Referer, Cookie')
self.request.response.headers.add('Access-Control-Max-Age', '86400')
# self.request.response.headers.add('Access-Control-Allow-Credentials', 'true')
return {}
@view_config(route_name='action', match_param='action=user_status',
renderer='json', permission='access')
def user_status(self):
""" set the status of specific user """
user_name = self.request.json_body.get('user')
def_status = self.request.registry.server_config['status_codes'][
'online']
user_status = int(self.request.json_body.get('status', def_status))
if not user_name:
self.request.response.status = 400
return {'error': "No username specified"}
user_inst = users.get(user_name)
if user_inst:
user_inst.status = user_status
# mark active
user_inst.last_active = datetime.utcnow()
return {}
@view_config(route_name='action', match_param='action=message',
renderer='json', permission='access')
def message(self):
msg_list = self.request.json_body
for msg in msg_list:
if msg.get('timestamp'):
# if present lets use timestamp provided in the message
if '.' in msg['timestamp']:
timestmp = datetime.strptime(msg['timestamp'],
'%Y-%m-%dT%H:%M:%S.%f')
else:
timestmp = datetime.strptime(msg['timestamp'],
'%Y-%m-%dT%H:%M:%S')
else:
timestmp = datetime.utcnow()
message = {'user': msg.get('user'),
'message': msg['message'],
'type': 'message',
'timestamp': timestmp
}
pm_users = msg.get('pm_users', [])
total_sent = 0
global total_unique_messages
total_unique_messages += 1
if msg.get('channel'):
channel_inst = channels.get(msg['channel'])
if channel_inst:
total_sent += channel_inst.add_message(message,
pm_users=pm_users
)
elif pm_users:
# if pm then iterate over all users and notify about new message hiyoo!!
for user_name in pm_users:
user_inst = users.get(user_name)
if user_inst:
total_sent += user_inst.add_message(message)
global total_messages
total_messages += total_sent
@view_config(route_name='action', match_param='action=channel_config',
renderer='json', permission='access')
def channel_config(self):
""" call this to subscribe specific connection to new channels """
channel_data = self.request.json_body
if not channel_data:
self.request.response.status = 400
return {'error': "No channels specified"}
json_data = []
for channel_name, config in channel_data:
if not channel_inst:
channel = Channel(channel_name)
channels[channel_name] = channel
channel_inst = channels[channel_name]
for k, v in config.iteritems():
setattr(channel_inst, k, v)
json_data.append({'name': channel_inst.name,
'long_name': channel_inst.long_name,
'presence': channel_inst.presence,
'salvagable': channel_inst.salvagable,
'store_history': channel_inst.store_history,
'history_size': channel_inst.history_size
})
return json_data
@view_config(
context='channelstream.wsgi_views.wsgi_security:RequestBasicChannenge')
def admin_challenge(self):
response = HTTPUnauthorized()
response.headers.update(forget(self.request))
return response
@view_config(route_name='admin',
renderer='templates/admin.jinja2', permission='access')
def admin(self):
uptime = datetime.utcnow() - started_on
remembered_user_count = len(
[user for user in users.iteritems()])
unique_user_count = len(
[user for user in users.itervalues() if
user.connections])
total_connections = sum(
[len(user.connections) for user in users.itervalues()])
return {
"remembered_user_count": remembered_user_count,
"unique_user_count": unique_user_count,
"total_connections": total_connections,
"total_messages": total_messages,
"total_unique_messages": total_unique_messages,
"channels": channels,
"users": users, "uptime": uptime
}
@view_config(route_name='action', match_param='action=info',
renderer='json', permission='access')
def info(self):
start_time = datetime.now()
json_data = {"channels": {}, "unique_users": len(users)}
# select everything for empty list
if not self.request.body or not self.request.json_body.get('channels'):
req_channels = channels.keys()
else:
req_channels = self.request.json_body['channels']
# return requested channel info
for channel_inst in [chan for chan in channels.values() if
chan.name in req_channels]:
json_data["channels"][channel_inst.name] = {}
json_data["channels"][channel_inst.name]['total_users'] = len(
channel_inst.connections)
json_data["channels"][channel_inst.name]['total_connections'] = sum(
[len(conns) for conns in channel_inst.connections.values()])
json_data["channels"][channel_inst.name]['users'] = []
for user_name in channel_inst.connections.keys():
user_inst = users.get(user_name)
udata = {'user': user_inst.user_name,
'status': user_inst.status,
"connections": [conn.id for conn in
channel_inst.connections[user_name]]}
json_data["channels"][channel_inst.name]['users'].append(udata)
json_data["channels"][channel_inst.name][
'last_active'] = channel_inst.last_active
log.info('info time: %s' % (datetime.now() - start_time))
return json_data
|
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.core as core
class TestDiffOp(unittest.TestCase):
def set_args(self):
self.input = np.array([1, 4, 5, 2]).astype('float32')
self.n = 1
self.axis = -1
self.prepend = None
self.append = None
def get_output(self):
if self.prepend is not None and self.append is not None:
self.output = np.diff(
self.input,
n=self.n,
axis=self.axis,
prepend=self.prepend,
append=self.append)
elif self.prepend is not None:
self.output = np.diff(
self.input, n=self.n, axis=self.axis, prepend=self.prepend)
elif self.append is not None:
self.output = np.diff(
self.input, n=self.n, axis=self.axis, append=self.append)
else:
self.output = np.diff(self.input, n=self.n, axis=self.axis)
def setUp(self):
self.set_args()
self.get_output()
self.places = [paddle.CPUPlace()]
if core.is_compiled_with_cuda():
self.places.append(paddle.CUDAPlace(0))
def test_dygraph(self):
for place in self.places:
paddle.disable_static()
x = paddle.to_tensor(self.input, place=place)
if self.prepend is not None:
self.prepend = paddle.to_tensor(self.prepend, place=place)
if self.append is not None:
self.append = paddle.to_tensor(self.append, place=place)
out = paddle.diff(
x,
n=self.n,
axis=self.axis,
prepend=self.prepend,
append=self.append)
self.assertTrue((out.numpy() == self.output).all(), True)
def test_static(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for place in places:
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = paddle.fluid.data(
name="input",
shape=self.input.shape,
dtype=self.input.dtype)
has_pend = False
prepend = None
append = None
if self.prepend is not None:
has_pend = True
prepend = paddle.fluid.data(
name="prepend",
shape=self.prepend.shape,
dtype=self.prepend.dtype)
if self.append is not None:
has_pend = True
append = paddle.fluid.data(
name="append",
shape=self.append.shape,
dtype=self.append.dtype)
exe = fluid.Executor(place)
out = paddle.diff(
x, n=self.n, axis=self.axis, prepend=prepend, append=append)
fetches = exe.run(fluid.default_main_program(),
feed={
"input": self.input,
"prepend": self.prepend,
"append": self.append
},
fetch_list=[out])
self.assertTrue((fetches[0] == self.output).all(), True)
def test_grad(self):
for place in self.places:
x = paddle.to_tensor(self.input, place=place, stop_gradient=False)
if self.prepend is not None:
self.prepend = paddle.to_tensor(self.prepend, place=place)
if self.append is not None:
self.append = paddle.to_tensor(self.append, place=place)
out = paddle.diff(
x,
n=self.n,
axis=self.axis,
prepend=self.prepend,
append=self.append)
try:
out.backward()
x_grad = x.grad
except:
raise RuntimeError("Check Diff Gradient Failed")
class TestDiffOpAxis(TestDiffOp):
def set_args(self):
self.input = np.array([[1, 4, 5, 2], [1, 5, 4, 2]]).astype('float32')
self.n = 1
self.axis = 0
self.prepend = None
self.append = None
class TestDiffOpNDim(TestDiffOp):
def set_args(self):
self.input = np.random.rand(10, 10).astype('float32')
self.n = 1
self.axis = -1
self.prepend = None
self.append = None
class TestDiffOpBool(TestDiffOp):
def set_args(self):
self.input = np.array([0, 1, 1, 0, 1, 0]).astype('bool')
self.n = 1
self.axis = -1
self.prepend = None
self.append = None
class TestDiffOpPrepend(TestDiffOp):
def set_args(self):
self.input = np.array([[1, 4, 5, 2], [1, 5, 4, 2]]).astype('float32')
self.n = 1
self.axis = -1
self.prepend = np.array([[2, 3, 4], [1, 3, 5]]).astype('float32')
self.append = None
class TestDiffOpPrependAxis(TestDiffOp):
def set_args(self):
self.input = np.array([[1, 4, 5, 2], [1, 5, 4, 2]]).astype('float32')
self.n = 1
self.axis = 0
self.prepend = np.array(
[[0, 2, 3, 4], [1, 3, 5, 7], [2, 5, 8, 0]]).astype('float32')
self.append = None
class TestDiffOpAppend(TestDiffOp):
def set_args(self):
self.input = np.array([[1, 4, 5, 2], [1, 5, 4, 2]]).astype('float32')
self.n = 1
self.axis = -1
self.prepend = None
self.append = np.array([[2, 3, 4], [1, 3, 5]]).astype('float32')
class TestDiffOpAppendAxis(TestDiffOp):
def set_args(self):
self.input = np.array([[1, 4, 5, 2], [1, 5, 4, 2]]).astype('float32')
self.n = 1
self.axis = 0
self.prepend = None
self.append = np.array([[2, 3, 4, 1]]).astype('float32')
class TestDiffOpPreAppend(TestDiffOp):
def set_args(self):
self.input = np.array([[1, 4, 5, 2], [1, 5, 4, 2]]).astype('float32')
self.n = 1
self.axis = -1
self.prepend = np.array([[0, 4], [5, 9]]).astype('float32')
self.append = np.array([[2, 3, 4], [1, 3, 5]]).astype('float32')
class TestDiffOpPreAppendAxis(TestDiffOp):
def set_args(self):
self.input = np.array([[1, 4, 5, 2], [1, 5, 4, 2]]).astype('float32')
self.n = 1
self.axis = 0
self.prepend = np.array([[0, 4, 5, 9], [5, 9, 2, 3]]).astype('float32')
self.append = np.array([[2, 3, 4, 7], [1, 3, 5, 6]]).astype('float32')
if __name__ == '__main__':
unittest.main()
|
|
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
"""Pulls floodlights from a sheet, checks if impressions have changed significantly and sends an alert email.
For example ( modify floodlight_monitor/test.json to include your account and sheet ):
python floodlight_monitor/run.py floodlight_monitor/test.json -u [user credentials path]
"""
from statistics import quantiles
from datetime import date, timedelta
from typing import Generator
from starthinker.util.cm import report_build, report_file, report_to_rows, report_clean, parse_account
from starthinker.util.sheets import sheets_tab_copy, sheets_read, sheets_url
from starthinker.util.csv import rows_to_type, rows_header_trim
from starthinker.util.email import send_email
from starthinker.util.email_template import EmailTemplate
FLOODLIGHT_DATE = 0
FLOODLIGHT_CONFIG_ID = 1
FLOODLIGHT_GROUP_ID = 2
FLOODLIGHT_ACTIVITY_GROUP = 3
FLOODLIGHT_ACTIVITY_ID = 4
FLOODLIGHT_ACTIVITY = 5
FLOODLIGHT_IMPRESSIONS = 6
FLOODLIGHT_STATUS = 7 # added by the script ( LOW, NORMAL, HIGH )
TRIGGER_ID = 0 # from source
TRIGGER_EMAIL = 1 # from source
TRIGGER_REPORT = 2 # added by this script
def floodlight_report(config, task:dict, floodlight_id: int) -> int:
""" Create a report for a specific floodlight if it does not exist.
Args:
floodlight_id - the floodlight being monitored
Returns:
The id of the created report.
"""
account_id, subaccount_id = parse_account(
config,
task['auth'],
task['account']
)
name = 'Floodlight Monitor %s %s ( StarThinker )' % (
account_id,
floodlight_id
)
if config.verbose:
print('FLOODLIGHT MONITOR REPORT: ', name)
# create report if it does not exists
report = report_build(
config,
task['auth'],
task['account'],
{ 'kind': 'dfareporting#report',
'type': 'FLOODLIGHT',
'accountId': account_id,
'name': name,
'fileName': name.replace('( ', '').replace(' )', '').replace(' ', '_'),
'format': 'CSV',
'delivery': { 'emailOwner': False },
'floodlightCriteria': {
'dateRange': {
'kind': 'dfareporting#dateRange',
'relativeDateRange': 'LAST_7_DAYS'
},
'dimensions': [
{'kind': 'dfareporting#sortedDimension','name': 'date' },
{ 'kind': 'dfareporting#sortedDimension', 'name': 'floodlightConfigId' },
{ 'kind': 'dfareporting#sortedDimension', 'name': 'activityGroupId' },
{ 'kind': 'dfareporting#sortedDimension', 'name': 'activityGroup' },
{ 'kind': 'dfareporting#sortedDimension', 'name': 'activityId' },
{ 'kind': 'dfareporting#sortedDimension', 'name': 'activity' }
],
'floodlightConfigId': {
'dimensionName': 'floodlightConfigId',
'kind': 'dfareporting#dimensionValue',
'matchType': 'EXACT',
'value': floodlight_id
},
'metricNames': ['floodlightImpressions'],
'reportProperties': {
'includeUnattributedCookieConversions': False,
'includeUnattributedIPConversions': False
}
},
'schedule': {
'active': True,
'every': 1,
'repeats': 'DAILY',
'startDate': str(date.today()),
'expirationDate': str((date.today() + timedelta(days=365))),
},
})
return report['id']
def floodlight_rows(config, task:dict, report_id:int) -> Generator[list[str, str, str, str, str, str, int], None, None]:
""" Monitor a report for completion and return rows
Args:
report_id - the report created earlier for a specific floodlight id.
Returns:
A stream of rows, see FLOODLIGHT_* constants for definitions.
"""
# fetch report file if it exists
filename, report = report_file(
config,
task['auth'],
task['account'],
report_id,
None, # no name
10 # wait up to 10 minutes for report to complete
)
# clean up rows
rows = report_to_rows(report)
rows = report_clean(rows)
rows = rows_header_trim(rows)
rows = rows_to_type(rows, column=6)
return rows
def floodlight_analysis(config, task:dict, rows:Generator[list[str, str, str, str, str, str, int], None, None]) -> list[str, list[str, str, str, str, str, str, int, str]]:
""" Perform outlier analysis and return last row by date with satatus indicator.
Groups all floodlight data by activity, checking for ourliers using.
See: http://www.mathwords.com/o/outlier.htm
Args:
rows - A stream of rows, see FLOODLIGHT_* constants for definitions.
Returns:
A date string for the last date as well as the last row for each activity with status appended (LOW, HIGH, NORMAL).
Possibly None, None if no rows.
"""
outliers_today = []
activities = {}
for row in rows:
activities.setdefault(row[FLOODLIGHT_ACTIVITY_ID], []).append(row)
for activity in activities.values():
data = sorted(activity, key=lambda k: k[FLOODLIGHT_IMPRESSIONS])
quartile_1, quartile_median, quartile_3 = quantiles(map(lambda d:d[FLOODLIGHT_IMPRESSIONS], data), n=4)
quartile_range = quartile_3 - quartile_1
outlier_top = quartile_3 + (1.5 * quartile_range)
outlier_bottom = quartile_1 - (1.5 * quartile_range)
last_day = max(data, key=lambda k:k[FLOODLIGHT_DATE])
if last_day[FLOODLIGHT_IMPRESSIONS] == 0 or last_day[FLOODLIGHT_IMPRESSIONS] < outlier_bottom:
last_day.append('LOW')
elif last_day[FLOODLIGHT_IMPRESSIONS] > outlier_top:
last_day.append('HIGH')
else:
last_day.append('NORMAL')
outliers_today.append((
last_day[FLOODLIGHT_DATE],
last_day[FLOODLIGHT_CONFIG_ID],
last_day[FLOODLIGHT_ACTIVITY_ID],
last_day[FLOODLIGHT_ACTIVITY],
last_day[FLOODLIGHT_IMPRESSIONS],
last_day[FLOODLIGHT_STATUS],
))
if len(outliers_today) > 0:
return outliers_today[0][FLOODLIGHT_DATE], outliers_today
else:
return None, None
def floodlight_email(config, task:dict, day:str, alerts:dict[str, list[str, str, str, str, int, str]]) -> None:
""" Send an email to each alert group with status of all activities.
The email template will contain all activities for each email address specified in the input sheet.
Args:
day - the latest day that was present in all combined reports, used for title of email.
alerts - Each email in the sheet with a list of activities and statuses.
Returns:
Nothing.
"""
for email, table in alerts.items():
# build email template
t = EmailTemplate()
t.align('center')
t.section(True)
# when floodlight alerts exist
issues = sum(1 for row in table if row[5] != 'NORMAL')
if issues > 0:
subject = '%d Floodlight Alerts For %s' % (issues, day)
else:
subject = 'All Floodlights Normal For %s' % day
t.header(subject)
t.paragraph('The following floodlights are being monitored. A status of LOW or HIGH inidcates impressions have changed significantly for the day. A status of NORMAL means impressions are close to the average for the past 7 days.')
t.table([
{ 'name': 'Date', 'type': 'STRING' },
{ 'name': 'Floodlight', 'type': 'STRING' },
{ 'name': 'Activity Id', 'type': 'STRING' },
{ 'name': 'Activity', 'type': 'STRING' },
{ 'name': 'Impressions', 'type': 'INTEGER' },
{ 'name': 'Status', 'type': 'STRING' },
], table)
t.paragraph('Your monitored floodlights and recipients are listed in the sheet below.')
# either way link to the configuration sheet
t.button(
'Floodlight Monitoring Sheet',
sheets_url(config, task['auth'], task['sheet']['sheet']),
big=True
)
t.section(False)
if config.verbose:
print('FLOODLIGHT MONITOR EMAIL ALERTS', email, len(table))
# send email template
send_email(
config,
task['auth'],
email,
None,
None,
subject,
t.get_text(),
t.get_html()
)
def floodlight_monitor(config, task:dict) -> None:
""" The task handler. See module description.
Args:
Everuthing is passed using task.
Returns:
Nothing.
"""
if config.verbose:
print('FLOODLIGHT MONITOR')
# make sure tab exists in sheet ( deprecated, use sheet task instead )
if 'template' in task['sheet']:
sheets_tab_copy(
config,
task['auth'],
task['sheet']['template']['sheet'],
task['sheet']['template']['tab'],
task['sheet']['sheet'],
task['sheet']['tab']
)
# read peers from sheet
triggers = sheets_read(
config,
task['auth'],
task['sheet']['sheet'],
task['sheet']['tab'],
task['sheet']['range']
)
if config.verbose and len(triggers) == 0:
print('FLOODLIGHT MONITOR: No floodlight ids specified in sheet.')
alerts = {}
day = None
# create reports first in parallel
for trigger in triggers:
trigger.append(floodlight_report(config, task, trigger[TRIGGER_ID]))
# download data from all reports
for trigger in triggers:
# get report rows for each floodlight
rows = floodlight_rows(config, task, trigger[TRIGGER_REPORT])
# calculate outliers
last_day, rows = floodlight_analysis(config, task, rows)
if last_day:
# find last day report ran
day = last_day if day is None else max(day, last_day)
# group alerts by email
alerts.setdefault(trigger[TRIGGER_EMAIL], [])
alerts[trigger[TRIGGER_EMAIL]].extend(rows)
if alerts:
floodlight_email(config, task, day, alerts)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SqlVulnerabilityAssessmentBaselineRulesOperations(object):
"""SqlVulnerabilityAssessmentBaselineRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.security.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create_or_update(
self,
rule_id, # type: str
workspace_id, # type: str
resource_id, # type: str
body=None, # type: Optional["_models.RuleResultsInput"]
**kwargs # type: Any
):
# type: (...) -> "_models.RuleResults"
"""Creates a Baseline for a rule in a database. Will overwrite any previously existing results.
Creates a Baseline for a rule in a database. Will overwrite any previously existing results.
:param rule_id: The rule Id.
:type rule_id: str
:param workspace_id: The workspace Id.
:type workspace_id: str
:param resource_id: The identifier of the resource.
:type resource_id: str
:param body: The baseline results for this rule.
:type body: ~azure.mgmt.security.models.RuleResultsInput
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RuleResults, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.RuleResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RuleResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'ruleId': self._serialize.url("rule_id", rule_id, 'str'),
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['workspaceId'] = self._serialize.query("workspace_id", workspace_id, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'RuleResultsInput')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RuleResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/{resourceId}/providers/Microsoft.Security/sqlVulnerabilityAssessments/default/baselineRules/{ruleId}'} # type: ignore
def get(
self,
rule_id, # type: str
workspace_id, # type: str
resource_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.RuleResults"
"""Gets the results for a given rule in the Baseline.
Gets the results for a given rule in the Baseline.
:param rule_id: The rule Id.
:type rule_id: str
:param workspace_id: The workspace Id.
:type workspace_id: str
:param resource_id: The identifier of the resource.
:type resource_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RuleResults, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.RuleResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RuleResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'ruleId': self._serialize.url("rule_id", rule_id, 'str'),
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['workspaceId'] = self._serialize.query("workspace_id", workspace_id, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RuleResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{resourceId}/providers/Microsoft.Security/sqlVulnerabilityAssessments/default/baselineRules/{ruleId}'} # type: ignore
def delete(
self,
rule_id, # type: str
workspace_id, # type: str
resource_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes a rule from the Baseline of a given database.
Deletes a rule from the Baseline of a given database.
:param rule_id: The rule Id.
:type rule_id: str
:param workspace_id: The workspace Id.
:type workspace_id: str
:param resource_id: The identifier of the resource.
:type resource_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'ruleId': self._serialize.url("rule_id", rule_id, 'str'),
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['workspaceId'] = self._serialize.query("workspace_id", workspace_id, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/{resourceId}/providers/Microsoft.Security/sqlVulnerabilityAssessments/default/baselineRules/{ruleId}'} # type: ignore
def list(
self,
workspace_id, # type: str
resource_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.RulesResults"
"""Gets the results for all rules in the Baseline.
Gets the results for all rules in the Baseline.
:param workspace_id: The workspace Id.
:type workspace_id: str
:param resource_id: The identifier of the resource.
:type resource_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RulesResults, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.RulesResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RulesResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01-preview"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['workspaceId'] = self._serialize.query("workspace_id", workspace_id, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RulesResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/{resourceId}/providers/Microsoft.Security/sqlVulnerabilityAssessments/default/baselineRules'} # type: ignore
def add(
self,
workspace_id, # type: str
resource_id, # type: str
body=None, # type: Optional["_models.RulesResultsInput"]
**kwargs # type: Any
):
# type: (...) -> "_models.RulesResults"
"""Add a list of baseline rules. Will overwrite any previously existing results (for all rules).
Add a list of baseline rules. Will overwrite any previously existing results (for all rules).
:param workspace_id: The workspace Id.
:type workspace_id: str
:param resource_id: The identifier of the resource.
:type resource_id: str
:param body: The baseline rules.
:type body: ~azure.mgmt.security.models.RulesResultsInput
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RulesResults, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.RulesResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RulesResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.add.metadata['url'] # type: ignore
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['workspaceId'] = self._serialize.query("workspace_id", workspace_id, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'RulesResultsInput')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RulesResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
add.metadata = {'url': '/{resourceId}/providers/Microsoft.Security/sqlVulnerabilityAssessments/default/baselineRules'} # type: ignore
|
|
# (C) Datadog, Inc. 2012-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from collections import defaultdict
import re
import time
# 3rd party
import requests
# project
from checks import AgentCheck
from config import _is_affirmative
from util import headers
STATS_URL = "/;csv;norefresh"
EVENT_TYPE = SOURCE_TYPE_NAME = 'haproxy'
class Services(object):
BACKEND = 'BACKEND'
FRONTEND = 'FRONTEND'
ALL = (BACKEND, FRONTEND)
ALL_STATUSES = (
'up', 'open', 'down', 'maint', 'nolb'
)
STATUSES_TO_SERVICE_CHECK = {
'UP': AgentCheck.OK,
'DOWN': AgentCheck.CRITICAL,
'no check': AgentCheck.UNKNOWN,
'MAINT': AgentCheck.OK,
}
class HAProxy(AgentCheck):
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# Host status needs to persist across all checks
self.host_status = defaultdict(lambda: defaultdict(lambda: None))
METRICS = {
"qcur": ("gauge", "queue.current"),
"scur": ("gauge", "session.current"),
"slim": ("gauge", "session.limit"),
"spct": ("gauge", "session.pct"), # Calculated as: (scur/slim)*100
"stot": ("rate", "session.rate"),
"bin": ("rate", "bytes.in_rate"),
"bout": ("rate", "bytes.out_rate"),
"dreq": ("rate", "denied.req_rate"),
"dresp": ("rate", "denied.resp_rate"),
"ereq": ("rate", "errors.req_rate"),
"econ": ("rate", "errors.con_rate"),
"eresp": ("rate", "errors.resp_rate"),
"wretr": ("rate", "warnings.retr_rate"),
"wredis": ("rate", "warnings.redis_rate"),
"req_rate": ("gauge", "requests.rate"), # HA Proxy 1.4 and higher
"hrsp_1xx": ("rate", "response.1xx"), # HA Proxy 1.4 and higher
"hrsp_2xx": ("rate", "response.2xx"), # HA Proxy 1.4 and higher
"hrsp_3xx": ("rate", "response.3xx"), # HA Proxy 1.4 and higher
"hrsp_4xx": ("rate", "response.4xx"), # HA Proxy 1.4 and higher
"hrsp_5xx": ("rate", "response.5xx"), # HA Proxy 1.4 and higher
"hrsp_other": ("rate", "response.other"), # HA Proxy 1.4 and higher
"qtime": ("gauge", "queue.time"), # HA Proxy 1.5 and higher
"ctime": ("gauge", "connect.time"), # HA Proxy 1.5 and higher
"rtime": ("gauge", "response.time"), # HA Proxy 1.5 and higher
"ttime": ("gauge", "session.time"), # HA Proxy 1.5 and higher
}
SERVICE_CHECK_NAME = 'haproxy.backend_up'
def check(self, instance):
url = instance.get('url')
username = instance.get('username')
password = instance.get('password')
collect_aggregates_only = _is_affirmative(
instance.get('collect_aggregates_only', True)
)
collect_status_metrics = _is_affirmative(
instance.get('collect_status_metrics', False)
)
collect_status_metrics_by_host = _is_affirmative(
instance.get('collect_status_metrics_by_host', False)
)
count_status_by_service = _is_affirmative(
instance.get('count_status_by_service', True)
)
tag_service_check_by_host = _is_affirmative(
instance.get('tag_service_check_by_host', False)
)
services_incl_filter = instance.get('services_include', [])
services_excl_filter = instance.get('services_exclude', [])
verify = not _is_affirmative(instance.get('disable_ssl_validation', False))
self.log.debug('Processing HAProxy data for %s' % url)
data = self._fetch_data(url, username, password, verify)
process_events = instance.get('status_check', self.init_config.get('status_check', False))
self._process_data(
data, collect_aggregates_only, process_events,
url=url, collect_status_metrics=collect_status_metrics,
collect_status_metrics_by_host=collect_status_metrics_by_host,
tag_service_check_by_host=tag_service_check_by_host,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter,
count_status_by_service=count_status_by_service,
)
def _fetch_data(self, url, username, password, verify):
''' Hit a given URL and return the parsed json '''
# Try to fetch data from the stats URL
auth = (username, password)
url = "%s%s" % (url, STATS_URL)
self.log.debug("HAProxy Fetching haproxy search data from: %s" % url)
r = requests.get(url, auth=auth, headers=headers(self.agentConfig), verify=verify)
r.raise_for_status()
return r.content.splitlines()
def _process_data(self, data, collect_aggregates_only, process_events, url=None,
collect_status_metrics=False, collect_status_metrics_by_host=False,
tag_service_check_by_host=False, services_incl_filter=None,
services_excl_filter=None, count_status_by_service=True):
''' Main data-processing loop. For each piece of useful data, we'll
either save a metric, save an event or both. '''
# Split the first line into an index of fields
# The line looks like:
# "# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,"
fields = [f.strip() for f in data[0][2:].split(',') if f]
self.hosts_statuses = defaultdict(int)
back_or_front = None
# Skip the first line, go backwards to set back_or_front
for line in data[:0:-1]:
if not line.strip():
continue
# Store each line's values in a dictionary
data_dict = self._line_to_dict(fields, line)
if self._is_aggregate(data_dict):
back_or_front = data_dict['svname']
self._update_data_dict(data_dict, back_or_front)
self._update_hosts_statuses_if_needed(
collect_status_metrics, collect_status_metrics_by_host,
data_dict, self.hosts_statuses
)
if self._should_process(data_dict, collect_aggregates_only):
# update status
# Send the list of data to the metric and event callbacks
self._process_metrics(
data_dict, url,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter
)
if process_events:
self._process_event(
data_dict, url,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter
)
self._process_service_check(
data_dict, url,
tag_by_host=tag_service_check_by_host,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter
)
if collect_status_metrics:
self._process_status_metric(
self.hosts_statuses, collect_status_metrics_by_host,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter,
count_status_by_service=count_status_by_service
)
self._process_backend_hosts_metric(
self.hosts_statuses,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter
)
return data
def _line_to_dict(self, fields, line):
data_dict = {}
for i, val in enumerate(line.split(',')[:]):
if val:
try:
# Try converting to a long, if failure, just leave it
val = float(val)
except Exception:
pass
data_dict[fields[i]] = val
return data_dict
def _update_data_dict(self, data_dict, back_or_front):
"""
Adds spct if relevant, adds service
"""
data_dict['back_or_front'] = back_or_front
# The percentage of used sessions based on 'scur' and 'slim'
if 'slim' in data_dict and 'scur' in data_dict:
try:
data_dict['spct'] = (data_dict['scur'] / data_dict['slim']) * 100
except (TypeError, ZeroDivisionError):
pass
def _is_aggregate(self, data_dict):
return data_dict['svname'] in Services.ALL
def _update_hosts_statuses_if_needed(self, collect_status_metrics,
collect_status_metrics_by_host,
data_dict, hosts_statuses):
if data_dict['svname'] == Services.BACKEND:
return
if collect_status_metrics and 'status' in data_dict and 'pxname' in data_dict:
if collect_status_metrics_by_host and 'svname' in data_dict:
key = (data_dict['pxname'], data_dict['svname'], data_dict['status'])
else:
key = (data_dict['pxname'], data_dict['status'])
hosts_statuses[key] += 1
def _should_process(self, data_dict, collect_aggregates_only):
"""
if collect_aggregates_only, we process only the aggregates
else we process all except Services.BACKEND
"""
if collect_aggregates_only:
if self._is_aggregate(data_dict):
return True
return False
elif data_dict['svname'] == Services.BACKEND:
return False
return True
def _is_service_excl_filtered(self, service_name, services_incl_filter,
services_excl_filter):
if self._tag_match_patterns(service_name, services_excl_filter):
if self._tag_match_patterns(service_name, services_incl_filter):
return False
return True
return False
def _tag_match_patterns(self, tag, filters):
if not filters:
return False
for rule in filters:
if re.search(rule, tag):
return True
return False
def _process_backend_hosts_metric(self, hosts_statuses, services_incl_filter=None,
services_excl_filter=None):
agg_statuses = defaultdict(lambda: {'available': 0, 'unavailable': 0})
for host_status, count in hosts_statuses.iteritems():
try:
service, hostname, status = host_status
except Exception:
service, status = host_status
if self._is_service_excl_filtered(service, services_incl_filter, services_excl_filter):
continue
status = status.lower()
if 'up' in status:
agg_statuses[service]['available'] += count
elif 'down' in status or 'maint' in status or 'nolb' in status:
agg_statuses[service]['unavailable'] += count
else:
# create the entries for this service anyway
agg_statuses[service]
for service in agg_statuses:
tags = ['service:%s' % service]
self.gauge(
'haproxy.backend_hosts',
agg_statuses[service]['available'],
tags=tags + ['available:true'])
self.gauge(
'haproxy.backend_hosts',
agg_statuses[service]['unavailable'],
tags=tags + ['available:false'])
return agg_statuses
def _process_status_metric(self, hosts_statuses, collect_status_metrics_by_host,
services_incl_filter=None, services_excl_filter=None,
count_status_by_service=True):
agg_statuses = defaultdict(lambda: {'available': 0, 'unavailable': 0})
# use a counter unless we have a unique tag set to gauge
counter = defaultdict(int)
if count_status_by_service and collect_status_metrics_by_host:
# `service` and `backend` tags will exist
counter = None
for host_status, count in hosts_statuses.iteritems():
try:
service, hostname, status = host_status
except Exception:
service, status = host_status
status = status.lower()
tags = []
if count_status_by_service:
tags.append('service:%s' % service)
if self._is_service_excl_filtered(service, services_incl_filter, services_excl_filter):
continue
if collect_status_metrics_by_host:
tags.append('backend:%s' % hostname)
self._gauge_all_statuses(
"haproxy.count_per_status",
count, status, tags, counter
)
if 'up' in status or 'open' in status:
agg_statuses[service]['available'] += count
if 'down' in status or 'maint' in status or 'nolb' in status:
agg_statuses[service]['unavailable'] += count
if counter is not None:
# send aggregated counts as gauges
for key, count in counter.iteritems():
metric_name, tags = key[0], key[1]
self.gauge(metric_name, count, tags=tags)
for service in agg_statuses:
for status, count in agg_statuses[service].iteritems():
tags = ['status:%s' % status]
if count_status_by_service:
tags.append('service:%s' % service)
self.gauge("haproxy.count_per_status", count, tags=tags)
def _gauge_all_statuses(self, metric_name, count, status, tags, counter):
if counter is not None:
counter_key = tuple([metric_name, tuple(tags + ['status:%s' % status])])
counter[counter_key] += count
else:
# assume we have enough context, just send a gauge
self.gauge(metric_name, count, tags + ['status:%s' % status])
for state in Services.ALL_STATUSES:
if state != status:
self.gauge(metric_name, 0, tags + ['status:%s' % state.replace(" ", "_")])
def _process_metrics(self, data, url, services_incl_filter=None,
services_excl_filter=None):
"""
Data is a dictionary related to one host
(one line) extracted from the csv.
It should look like:
{'pxname':'dogweb', 'svname':'i-4562165', 'scur':'42', ...}
"""
hostname = data['svname']
service_name = data['pxname']
back_or_front = data['back_or_front']
tags = ["type:%s" % back_or_front, "instance_url:%s" % url]
tags.append("service:%s" % service_name)
if self._is_service_excl_filtered(service_name, services_incl_filter,
services_excl_filter):
return
if back_or_front == Services.BACKEND:
tags.append('backend:%s' % hostname)
for key, value in data.items():
if HAProxy.METRICS.get(key):
suffix = HAProxy.METRICS[key][1]
name = "haproxy.%s.%s" % (back_or_front.lower(), suffix)
if HAProxy.METRICS[key][0] == 'rate':
self.rate(name, value, tags=tags)
else:
self.gauge(name, value, tags=tags)
def _process_event(self, data, url, services_incl_filter=None,
services_excl_filter=None):
'''
Main event processing loop. An event will be created for a service
status change.
Service checks on the server side can be used to provide the same functionality
'''
hostname = data['svname']
service_name = data['pxname']
key = "%s:%s" % (hostname, service_name)
status = self.host_status[url][key]
if self._is_service_excl_filtered(service_name, services_incl_filter,
services_excl_filter):
return
if status is None:
self.host_status[url][key] = data['status']
return
if status != data['status'] and data['status'] in ('UP', 'DOWN'):
# If the status of a host has changed, we trigger an event
try:
lastchg = int(data['lastchg'])
except Exception:
lastchg = 0
# Create the event object
ev = self._create_event(
data['status'], hostname, lastchg, service_name,
data['back_or_front']
)
self.event(ev)
# Store this host status so we can check against it later
self.host_status[url][key] = data['status']
def _create_event(self, status, hostname, lastchg, service_name, back_or_front):
HAProxy_agent = self.hostname.decode('utf-8')
if status == "DOWN":
alert_type = "error"
title = "%s reported %s:%s %s" % (HAProxy_agent, service_name, hostname, status)
else:
if status == "UP":
alert_type = "success"
else:
alert_type = "info"
title = "%s reported %s:%s back and %s" % (HAProxy_agent, service_name, hostname, status)
tags = ["service:%s" % service_name]
if back_or_front == Services.BACKEND:
tags.append('backend:%s' % hostname)
return {
'timestamp': int(time.time() - lastchg),
'event_type': EVENT_TYPE,
'host': HAProxy_agent,
'msg_title': title,
'alert_type': alert_type,
"source_type_name": SOURCE_TYPE_NAME,
"event_object": hostname,
"tags": tags
}
def _process_service_check(self, data, url, tag_by_host=False,
services_incl_filter=None, services_excl_filter=None):
''' Report a service check, tagged by the service and the backend.
Statuses are defined in `STATUSES_TO_SERVICE_CHECK` mapping.
'''
service_name = data['pxname']
status = data['status']
haproxy_hostname = self.hostname.decode('utf-8')
check_hostname = haproxy_hostname if tag_by_host else ''
if self._is_service_excl_filtered(service_name, services_incl_filter,
services_excl_filter):
return
if status in Services.STATUSES_TO_SERVICE_CHECK:
service_check_tags = ["service:%s" % service_name]
hostname = data['svname']
if data['back_or_front'] == Services.BACKEND:
service_check_tags.append('backend:%s' % hostname)
status = Services.STATUSES_TO_SERVICE_CHECK[status]
message = "%s reported %s:%s %s" % (haproxy_hostname, service_name,
hostname, status)
self.service_check(self.SERVICE_CHECK_NAME, status, message=message,
hostname=check_hostname, tags=service_check_tags)
|
|
# Copyright (c) 2009, Daniel Krech All rights reserved.
# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the Daniel Krech nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Support for automatically downloading Python packages from an URL."""
import codecs
import logging
import os
import shutil
import stat
import sys
import tarfile
import tempfile
import urllib2
import urlparse
import zipfile
_log = logging.getLogger(__name__)
class AutoInstaller(object):
"""Supports automatically installing Python packages from an URL.
Supports uncompressed files, .tar.gz, and .zip formats.
Basic usage:
installer = AutoInstaller()
installer.install(url="http://pypi.python.org/packages/source/p/pep8/pep8-0.5.0.tar.gz#md5=512a818af9979290cd619cce8e9c2e2b",
url_subpath="pep8-0.5.0/pep8.py")
installer.install(url="http://pypi.python.org/packages/source/m/mechanize/mechanize-0.2.4.zip",
url_subpath="mechanize")
"""
def __init__(self, append_to_search_path=False, make_package=True,
target_dir=None, temp_dir=None):
"""Create an AutoInstaller instance, and set up the target directory.
Args:
append_to_search_path: A boolean value of whether to append the
target directory to the sys.path search path.
make_package: A boolean value of whether to make the target
directory a package. This adds an __init__.py file
to the target directory -- allowing packages and
modules within the target directory to be imported
explicitly using dotted module names.
target_dir: The directory path to which packages should be installed.
Defaults to a subdirectory of the folder containing
this module called "autoinstalled".
temp_dir: The directory path to use for any temporary files
generated while downloading, unzipping, and extracting
packages to install. Defaults to a standard temporary
location generated by the tempfile module. This
parameter should normally be used only for development
testing.
"""
if target_dir is None:
this_dir = os.path.dirname(__file__)
target_dir = os.path.join(this_dir, "autoinstalled")
# Ensure that the target directory exists.
self._set_up_target_dir(target_dir, append_to_search_path, make_package)
self._target_dir = target_dir
self._temp_dir = temp_dir
def _write_file(self, path, text, encoding):
with codecs.open(path, "w", encoding) as filehandle:
filehandle.write(text)
def _set_up_target_dir(self, target_dir, append_to_search_path,
make_package):
"""Set up a target directory.
Args:
target_dir: The path to the target directory to set up.
append_to_search_path: A boolean value of whether to append the
target directory to the sys.path search path.
make_package: A boolean value of whether to make the target
directory a package. This adds an __init__.py file
to the target directory -- allowing packages and
modules within the target directory to be imported
explicitly using dotted module names.
"""
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if append_to_search_path:
sys.path.append(target_dir)
if make_package:
self._make_package(target_dir)
def _make_package(self, target_dir):
init_path = os.path.join(target_dir, "__init__.py")
if not os.path.exists(init_path):
text = ("# This file is required for Python to search this "
"directory for modules.\n")
self._write_file(init_path, text, "ascii")
def _create_scratch_directory_inner(self, prefix):
"""Create a scratch directory without exception handling.
Creates a scratch directory inside the AutoInstaller temp
directory self._temp_dir, or inside a platform-dependent temp
directory if self._temp_dir is None. Returns the path to the
created scratch directory.
Raises:
OSError: [Errno 2] if the containing temp directory self._temp_dir
is not None and does not exist.
"""
# The tempfile.mkdtemp() method function requires that the
# directory corresponding to the "dir" parameter already exist
# if it is not None.
scratch_dir = tempfile.mkdtemp(prefix=prefix.replace('/', '.'), dir=self._temp_dir)
return scratch_dir
def _create_scratch_directory(self, target_name):
"""Create a temporary scratch directory, and return its path.
The scratch directory is generated inside the temp directory
of this AutoInstaller instance. This method also creates the
temp directory if it does not already exist.
"""
prefix = target_name.replace(os.sep, "_") + "_"
try:
scratch_dir = self._create_scratch_directory_inner(prefix)
except OSError:
# Handle case of containing temp directory not existing--
# OSError: [Errno 2] No such file or directory:...
temp_dir = self._temp_dir
if temp_dir is None or os.path.exists(temp_dir):
raise
# Else try again after creating the temp directory.
os.makedirs(temp_dir)
scratch_dir = self._create_scratch_directory_inner(prefix)
return scratch_dir
def _url_downloaded_path(self, target_name):
return os.path.join(self._target_dir, ".%s.url" % target_name.replace('/', '_'))
def _is_downloaded(self, target_name, url):
version_path = self._url_downloaded_path(target_name)
if not os.path.exists(version_path):
return False
with codecs.open(version_path, "r", "utf-8") as filehandle:
return filehandle.read().strip() == url.strip()
def _record_url_downloaded(self, target_name, url):
version_path = self._url_downloaded_path(target_name)
self._write_file(version_path, url, "utf-8")
def _extract_targz(self, path, scratch_dir):
# tarfile.extractall() extracts to a path without the trailing ".tar.gz".
target_basename = os.path.basename(path[:-len(".tar.gz")])
target_path = os.path.join(scratch_dir, target_basename)
try:
tar_file = tarfile.open(path)
except tarfile.ReadError, err:
# Append existing Error message to new Error.
message = ("Could not open tar file: %s\n"
" The file probably does not have the correct format.\n"
" --> Inner message: %s"
% (path, err))
raise Exception(message)
try:
tar_file.extractall(target_path)
finally:
tar_file.close()
return target_path
# This is a replacement for ZipFile.extractall(), which is
# available in Python 2.6 but not in earlier versions.
# NOTE: The version in 2.6.1 (which shipped on Snow Leopard) is broken!
def _extract_all(self, zip_file, target_dir):
for name in zip_file.namelist():
path = os.path.join(target_dir, name)
if not os.path.basename(path):
# Then the path ends in a slash, so it is a directory.
os.makedirs(path)
continue
try:
# We open this file w/o encoding, as we're reading/writing
# the raw byte-stream from the zip file.
outfile = open(path, 'wb')
except IOError:
# Not all zip files seem to list the directories explicitly,
# so try again after creating the containing directory.
_log.debug("Got IOError: retrying after creating directory...")
dirname = os.path.dirname(path)
os.makedirs(dirname)
outfile = open(path, 'wb')
try:
outfile.write(zip_file.read(name))
finally:
outfile.close()
def _unzip(self, path, scratch_dir):
# zipfile.extractall() extracts to a path without the trailing ".zip".
target_basename = os.path.basename(path[:-len(".zip")])
target_path = os.path.join(scratch_dir, target_basename)
try:
zip_file = zipfile.ZipFile(path, "r")
except zipfile.BadZipfile, err:
message = ("Could not open zip file: %s\n"
" --> Inner message: %s"
% (path, err))
raise Exception(message)
try:
self._extract_all(zip_file, scratch_dir)
finally:
zip_file.close()
return target_path
def _prepare_package(self, path, scratch_dir):
"""Prepare a package for use, if necessary, and return the new path.
For example, this method unzips zipped files and extracts
tar files.
Args:
path: The path to the downloaded URL contents.
scratch_dir: The scratch directory. Note that the scratch
directory contains the file designated by the
path parameter.
"""
# FIXME: Add other natural extensions.
if path.endswith(".zip"):
new_path = self._unzip(path, scratch_dir)
elif path.endswith(".tar.gz"):
new_path = self._extract_targz(path, scratch_dir)
else:
# No preparation is needed.
new_path = path
return new_path
def _download_to_stream(self, url, stream):
failures = 0
while True:
try:
netstream = urllib2.urlopen(url)
break
except IOError, err:
# Try multiple times
if failures < 5:
_log.warning("Failed to download %s, %s retrying" % (
url, err))
failures += 1
continue
# Append existing Error message to new Error.
message = ('Could not download Python modules from URL "%s".\n'
" Make sure you are connected to the internet.\n"
" You must be connected to the internet when "
"downloading needed modules for the first time.\n"
" --> Inner message: %s"
% (url, err))
raise IOError(message)
code = 200
if hasattr(netstream, "getcode"):
code = netstream.getcode()
if not 200 <= code < 300:
raise ValueError("HTTP Error code %s" % code)
BUFSIZE = 2**13 # 8KB
while True:
data = netstream.read(BUFSIZE)
if not data:
break
stream.write(data)
netstream.close()
def _download(self, url, scratch_dir):
url_path = urlparse.urlsplit(url)[2]
url_path = os.path.normpath(url_path) # Removes trailing slash.
target_filename = os.path.basename(url_path)
target_path = os.path.join(scratch_dir, target_filename)
with open(target_path, "wb") as stream:
self._download_to_stream(url, stream)
return target_path
def _install(self, scratch_dir, package_name, target_path, url, url_subpath, files_to_remove):
"""Install a python package from an URL.
This internal method overwrites the target path if the target
path already exists.
"""
path = self._download(url=url, scratch_dir=scratch_dir)
path = self._prepare_package(path, scratch_dir)
if url_subpath is None:
source_path = path
else:
source_path = os.path.join(path, url_subpath)
for filename in files_to_remove:
path = os.path.join(source_path, filename.replace('/', os.sep))
if os.path.exists(path):
# Pre-emptively change the permissions to #0777 to try and work around win32 permissions issues.
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
os.remove(path)
if os.path.exists(target_path):
if os.path.isdir(target_path):
shutil.rmtree(target_path, ignore_errors=True)
else:
os.remove(target_path)
# shutil.move() command creates intermediate directories if they do not exist.
shutil.move(source_path, target_path)
# ensure all the new directories are importable.
intermediate_dirs = os.path.dirname(os.path.relpath(target_path, self._target_dir))
parent_dirname = self._target_dir
for dirname in intermediate_dirs.split(os.sep):
parent_dirname = os.path.join(parent_dirname, dirname)
self._make_package(parent_dirname)
self._record_url_downloaded(package_name, url)
def install(self, url, should_refresh=False, target_name=None,
url_subpath=None, files_to_remove=None):
"""Install a python package from an URL.
Args:
url: The URL from which to download the package.
Optional Args:
should_refresh: A boolean value of whether the package should be
downloaded again if the package is already present.
target_name: The name of the folder or file in the autoinstaller
target directory at which the package should be
installed. Defaults to the base name of the
URL sub-path. This parameter must be provided if
the URL sub-path is not specified.
url_subpath: The relative path of the URL directory that should
be installed. Defaults to the full directory, or
the entire URL contents.
"""
if target_name is None:
if not url_subpath:
raise ValueError('The "target_name" parameter must be '
'provided if the "url_subpath" parameter '
"is not provided.")
# Remove any trailing slashes.
url_subpath = os.path.normpath(url_subpath)
target_name = os.path.basename(url_subpath)
target_path = os.path.join(self._target_dir, target_name.replace('/', os.sep))
if not should_refresh and self._is_downloaded(target_name, url):
return False
files_to_remove = files_to_remove or []
package_name = target_name.replace(os.sep, '.')
_log.info("Auto-installing package: %s" % package_name)
# The scratch directory is where we will download and prepare
# files specific to this install until they are ready to move
# into place.
scratch_dir = self._create_scratch_directory(target_name)
try:
self._install(package_name=package_name,
target_path=target_path,
scratch_dir=scratch_dir,
url=url,
url_subpath=url_subpath,
files_to_remove=files_to_remove)
except Exception, err:
# Append existing Error message to new Error.
message = ("Error auto-installing the %s package to:\n"
' "%s"\n'
" --> Inner message: %s"
% (target_name, target_path, err))
raise Exception(message)
finally:
shutil.rmtree(scratch_dir, ignore_errors=True)
_log.debug('Auto-installed %s to:' % url)
_log.debug(' "%s"' % target_path)
return True
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import logging
import numpy as np
import time
from pymatgen.core.structure import Structure
from pymatgen.core.sites import PeriodicSite
from monty.json import MSONable
from scipy.spatial import Voronoi
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import my_solid_angle
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import get_lower_and_upper_f
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import rectangle_surface_intersection
from pymatgen.analysis.chemenv.utils.defs_utils import AdditionalConditions
from pymatgen.analysis.chemenv.utils.math_utils import normal_cdf_step
"""
This module contains the object used to describe the possible bonded atoms based on a Voronoi analysis
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
def from_bson_voronoi_list(bson_nb_voro_list, structure):
"""
Returns the voronoi_list needed for the VoronoiContainer object from a bson-encoded voronoi_list (composed of
vlist and bson_nb_voro_list).
:param vlist: List of voronoi objects
:param bson_nb_voro_list: List of periodic sites involved in the Voronoi
:return: The voronoi_list needed for the VoronoiContainer (with PeriodicSites as keys of the dictionary - not
allowed in the BSON format)
"""
voronoi_list = [None] * len(bson_nb_voro_list)
for isite, voro in enumerate(bson_nb_voro_list):
if voro is None or voro == 'None':
continue
voronoi_list[isite] = []
for psd, dd in voro:
struct_site = structure[dd['index']]
periodic_site = PeriodicSite(struct_site._species, struct_site.frac_coords + psd[1],
struct_site._lattice, properties=struct_site._properties)
voronoi_list[isite].append((periodic_site, dd))
return voronoi_list
def from_bson_voronoi_list2(bson_nb_voro_list2, structure):
"""
Returns the voronoi_list needed for the VoronoiContainer object from a bson-encoded voronoi_list (composed of
vlist and bson_nb_voro_list).
:param vlist: List of voronoi objects
:param bson_nb_voro_list: List of periodic sites involved in the Voronoi
:return: The voronoi_list needed for the VoronoiContainer (with PeriodicSites as keys of the dictionary - not
allowed in the BSON format)
"""
voronoi_list = [None] * len(bson_nb_voro_list2)
for isite, voro in enumerate(bson_nb_voro_list2):
if voro is None or voro == 'None':
continue
voronoi_list[isite] = []
for psd, dd in voro:
struct_site = structure[dd['index']]
periodic_site = PeriodicSite(struct_site._species, struct_site.frac_coords + psd[1],
struct_site._lattice, properties=struct_site._properties)
dd['site'] = periodic_site
voronoi_list[isite].append(dd)
return voronoi_list
class DetailedVoronoiContainer(MSONable):
"""
Class used to store the full Voronoi of a given structure.
"""
AC = AdditionalConditions()
default_voronoi_cutoff = 10.0
def __init__(self, structure=None, voronoi_list=None, voronoi_list2=None,
# neighbors_lists=None,
voronoi_cutoff=default_voronoi_cutoff, isites=None,
normalized_distance_tolerance=1e-5, normalized_angle_tolerance=1e-3,
additional_conditions=None, valences=None,
maximum_distance_factor=None, minimum_angle_factor=None):
"""
Constructor for the VoronoiContainer object. Either a structure is given, in which case the Voronoi is
computed, or the different components of the VoronoiContainer are given (used in the from_dict method)
:param structure: Structure for which the Voronoi is computed
:param voronoi_list: List of voronoi polyhedrons for each site
:param neighbors_list: list of neighbors for each site
:param voronoi_cutoff: cutoff used for the voronoi
:param isites: indices of sites for which the Voronoi has to be computed
:raise: RuntimeError if the Voronoi cannot be constructed
"""
self.normalized_distance_tolerance = normalized_distance_tolerance
self.normalized_angle_tolerance = normalized_angle_tolerance
if additional_conditions is None:
self.additional_conditions = [self.AC.NONE, self.AC.ONLY_ACB]
else:
self.additional_conditions = additional_conditions
self.valences = valences
self.maximum_distance_factor = maximum_distance_factor
self.minimum_angle_factor = minimum_angle_factor
if isites is None:
indices = list(range(len(structure)))
else:
indices = isites
self.structure = structure
logging.info('Setting Voronoi list')
if voronoi_list2 is not None:
self.voronoi_list2 = voronoi_list2
else:
self.setup_voronoi_list(indices=indices, voronoi_cutoff=voronoi_cutoff)
logging.info('Setting neighbors distances and angles')
t1 = time.clock()
self.setup_neighbors_distances_and_angles(indices=indices)
t2 = time.clock()
logging.info('Neighbors distances and angles set up in {:.2f} seconds'.format(t2-t1))
def setup_voronoi_list(self, indices, voronoi_cutoff):
"""
Set up of the voronoi list of neighbours by calling qhull
:param indices: indices of the sites for which the Voronoi is needed
:param voronoi_cutoff: Voronoi cutoff for the search of neighbours
:raise RuntimeError: If an infinite vertex is found in the voronoi construction
"""
self.voronoi_list2 = [None] * len(self.structure)
logging.info('Getting all neighbors in structure')
struct_neighbors = self.structure.get_all_neighbors(voronoi_cutoff, include_index=True)
t1 = time.clock()
logging.info('Setting up Voronoi list :')
for jj, isite in enumerate(indices):
logging.info(' - Voronoi analysis for site #{:d} ({:d}/{:d})'.format(isite, jj+1, len(indices)))
site = self.structure[isite]
neighbors1 = [(site, 0.0, isite)]
neighbors1.extend(struct_neighbors[isite])
distances = [i[1] for i in sorted(neighbors1, key=lambda s: s[1])]
neighbors = [i[0] for i in sorted(neighbors1, key=lambda s: s[1])]
qvoronoi_input = [s.coords for s in neighbors]
voro = Voronoi(points=qvoronoi_input, qhull_options="o Fv")
all_vertices = voro.vertices
results2 = []
maxangle = 0.0
mindist = 10000.0
for iridge, ridge_points in enumerate(voro.ridge_points):
if 0 in ridge_points:
ridge_vertices_indices = voro.ridge_vertices[iridge]
if -1 in ridge_vertices_indices:
raise RuntimeError("This structure is pathological,"
" infinite vertex in the voronoi "
"construction")
ridge_point2 = max(ridge_points)
facets = [all_vertices[i] for i in ridge_vertices_indices]
sa = my_solid_angle(site.coords, facets)
maxangle = max([sa, maxangle])
mindist = min([mindist, distances[ridge_point2]])
for iii, sss in enumerate(self.structure):
if neighbors[ridge_point2].is_periodic_image(sss):
myindex = iii
break
results2.append({'site': neighbors[ridge_point2],
'angle': sa,
'distance': distances[ridge_point2],
'index': myindex})
for dd in results2:
dd['normalized_angle'] = dd['angle'] / maxangle
dd['normalized_distance'] = dd['distance'] / mindist
self.voronoi_list2[isite] = results2
t2 = time.clock()
logging.info('Voronoi list set up in {:.2f} seconds'.format(t2-t1))
def setup_neighbors_distances_and_angles(self, indices):
"""
Initializes the angle and distance separations
:param indices: indices of the sites for which the Voronoi is needed
"""
self.neighbors_distances = [None] * len(self.structure)
self.neighbors_normalized_distances = [None] * len(self.structure)
self.neighbors_angles = [None] * len(self.structure)
self.neighbors_normalized_angles = [None] * len(self.structure)
for isite in indices:
results = self.voronoi_list2[isite]
if results is None:
continue
#Initializes neighbors distances and normalized distances groups
self.neighbors_distances[isite] = []
self.neighbors_normalized_distances[isite] = []
normalized_distances = [nb_dict['normalized_distance'] for nb_dict in results]
isorted_distances = np.argsort(normalized_distances)
self.neighbors_normalized_distances[isite].append({'min': normalized_distances[isorted_distances[0]],
'max': normalized_distances[isorted_distances[0]]})
self.neighbors_distances[isite].append({'min': results[isorted_distances[0]]['distance'],
'max': results[isorted_distances[0]]['distance']})
icurrent = 0
nb_indices = {int(isorted_distances[0])}
dnb_indices = {int(isorted_distances[0])}
for idist in iter(isorted_distances):
wd = normalized_distances[idist]
if self.maximum_distance_factor is not None:
if wd > self.maximum_distance_factor:
self.neighbors_normalized_distances[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_distances[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_normalized_distances[isite][icurrent]['dnb_indices'] = list(dnb_indices)
self.neighbors_distances[isite][icurrent]['dnb_indices'] = list(dnb_indices)
break
if np.isclose(wd, self.neighbors_normalized_distances[isite][icurrent]['max'],
rtol=0.0, atol=self.normalized_distance_tolerance):
self.neighbors_normalized_distances[isite][icurrent]['max'] = wd
self.neighbors_distances[isite][icurrent]['max'] = results[idist]['distance']
dnb_indices.add(int(idist))
else:
self.neighbors_normalized_distances[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_distances[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_normalized_distances[isite][icurrent]['dnb_indices'] = list(dnb_indices)
self.neighbors_distances[isite][icurrent]['dnb_indices'] = list(dnb_indices)
dnb_indices = {int(idist)}
self.neighbors_normalized_distances[isite].append({'min': wd,
'max': wd})
self.neighbors_distances[isite].append({'min': results[idist]['distance'],
'max': results[idist]['distance']})
icurrent += 1
nb_indices.add(int(idist))
else:
self.neighbors_normalized_distances[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_distances[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_normalized_distances[isite][icurrent]['dnb_indices'] = list(dnb_indices)
self.neighbors_distances[isite][icurrent]['dnb_indices'] = list(dnb_indices)
for idist in range(len(self.neighbors_distances[isite]) - 1):
dist_dict = self.neighbors_distances[isite][idist]
dist_dict_next = self.neighbors_distances[isite][idist+1]
dist_dict['next'] = dist_dict_next['min']
ndist_dict = self.neighbors_normalized_distances[isite][idist]
ndist_dict_next = self.neighbors_normalized_distances[isite][idist + 1]
ndist_dict['next'] = ndist_dict_next['min']
if self.maximum_distance_factor is not None:
dfact = self.maximum_distance_factor
else:
dfact = self.default_voronoi_cutoff / self.neighbors_distances[isite][0]['min']
self.neighbors_normalized_distances[isite][-1]['next'] = dfact
self.neighbors_distances[isite][-1]['next'] = dfact * self.neighbors_distances[isite][0]['min']
#Initializes neighbors angles and normalized angles groups
self.neighbors_angles[isite] = []
self.neighbors_normalized_angles[isite] = []
normalized_angles = [nb_dict['normalized_angle'] for nb_dict in results]
isorted_angles = np.argsort(normalized_angles)[::-1]
self.neighbors_normalized_angles[isite].append({'max': normalized_angles[isorted_angles[0]],
'min': normalized_angles[isorted_angles[0]]})
self.neighbors_angles[isite].append({'max': results[isorted_angles[0]]['angle'],
'min': results[isorted_angles[0]]['angle']})
icurrent = 0
nb_indices = {int(isorted_angles[0])}
dnb_indices = {int(isorted_angles[0])}
for iang in iter(isorted_angles):
wa = normalized_angles[iang]
if self.minimum_angle_factor is not None:
if wa < self.minimum_angle_factor:
self.neighbors_normalized_angles[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_angles[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_normalized_angles[isite][icurrent]['dnb_indices'] = list(dnb_indices)
self.neighbors_angles[isite][icurrent]['dnb_indices'] = list(dnb_indices)
break
if np.isclose(wa, self.neighbors_normalized_angles[isite][icurrent]['min'],
rtol=0.0, atol=self.normalized_angle_tolerance):
self.neighbors_normalized_angles[isite][icurrent]['min'] = wa
self.neighbors_angles[isite][icurrent]['min'] = results[iang]['angle']
dnb_indices.add(int(iang))
else:
self.neighbors_normalized_angles[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_angles[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_normalized_angles[isite][icurrent]['dnb_indices'] = list(dnb_indices)
self.neighbors_angles[isite][icurrent]['dnb_indices'] = list(dnb_indices)
dnb_indices = {int(iang)}
self.neighbors_normalized_angles[isite].append({'max': wa,
'min': wa})
self.neighbors_angles[isite].append({'max': results[iang]['angle'],
'min': results[iang]['angle']})
icurrent += 1
nb_indices.add(int(iang))
else:
self.neighbors_normalized_angles[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_angles[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_normalized_angles[isite][icurrent]['dnb_indices'] = list(dnb_indices)
self.neighbors_angles[isite][icurrent]['dnb_indices'] = list(dnb_indices)
for iang in range(len(self.neighbors_angles[isite]) - 1):
ang_dict = self.neighbors_angles[isite][iang]
ang_dict_next = self.neighbors_angles[isite][iang + 1]
ang_dict['next'] = ang_dict_next['max']
nang_dict = self.neighbors_normalized_angles[isite][iang]
nang_dict_next = self.neighbors_normalized_angles[isite][iang + 1]
nang_dict['next'] = nang_dict_next['max']
if self.minimum_angle_factor is not None:
afact = self.minimum_angle_factor
else:
afact = 0.0
self.neighbors_normalized_angles[isite][-1]['next'] = afact
self.neighbors_angles[isite][-1]['next'] = afact * self.neighbors_angles[isite][0]['max']
def _precompute_additional_conditions(self, ivoronoi, voronoi, valences):
additional_conditions = {ac: [] for ac in self.additional_conditions}
for ips, (ps, vals) in enumerate(voronoi):
for ac in self.additional_conditions:
additional_conditions[ac].append(self.AC.check_condition(condition=ac, structure=self.structure,
parameters={'valences': valences,
'neighbor_index': vals['index'],
'site_index': ivoronoi}))
return additional_conditions
def _precompute_distance_conditions(self, ivoronoi, voronoi):
distance_conditions = []
for idp, dp_dict in enumerate(self.neighbors_normalized_distances[ivoronoi]):
distance_conditions.append([])
dp = dp_dict['max']
for ips, (ps, vals) in enumerate(voronoi):
distance_conditions[idp].append(vals['normalized_distance'] <= dp or
np.isclose(vals['normalized_distance'], dp,
rtol=0.0, atol=self.normalized_distance_tolerance/2.0))
return distance_conditions
def _precompute_angle_conditions(self, ivoronoi, voronoi):
angle_conditions = []
for iap, ap_dict in enumerate(self.neighbors_normalized_angles[ivoronoi]):
angle_conditions.append([])
ap = ap_dict['max']
for ips, (ps, vals) in enumerate(voronoi):
angle_conditions[iap].append(vals['normalized_angle'] >= ap or
np.isclose(vals['normalized_angle'], ap,
rtol=0.0, atol=self.normalized_angle_tolerance/2.0))
return angle_conditions
def neighbors_map(self, isite, distfactor, angfactor, additional_condition):
if self.neighbors_normalized_distances[isite] is None:
return None
dist_where = np.argwhere(np.array([wd['min'] for wd in self.neighbors_normalized_distances[isite]]) <= distfactor)
if len(dist_where) == 0:
return None
idist = dist_where[-1][0]
ang_where = np.argwhere(np.array([wa['max'] for wa in self.neighbors_normalized_angles[isite]]) >= angfactor)
if len(ang_where) == 0:
return None
iang = ang_where[0][0]
if self.additional_conditions.count(additional_condition) != 1:
return None
i_additional_condition = self.additional_conditions.index(additional_condition)
return {'i_distfactor': idist, 'i_angfactor': iang, 'i_additional_condition': i_additional_condition}
def neighbors_surfaces(self, isite, surface_calculation_type=None, max_dist=2.0):
if self.voronoi_list2[isite] is None:
return None
bounds_and_limits = self.voronoi_parameters_bounds_and_limits(isite, surface_calculation_type, max_dist)
distance_bounds = bounds_and_limits['distance_bounds']
angle_bounds = bounds_and_limits['angle_bounds']
surfaces = np.zeros((len(distance_bounds), len(angle_bounds)), np.float)
for idp in range(len(distance_bounds) - 1):
this_dist_plateau = distance_bounds[idp + 1] - distance_bounds[idp]
for iap in range(len(angle_bounds) - 1):
this_ang_plateau = angle_bounds[iap + 1] - angle_bounds[iap]
surfaces[idp][iap] = np.absolute(this_dist_plateau*this_ang_plateau)
return surfaces
def neighbors_surfaces_bounded(self, isite, surface_calculation_options=None):
if self.voronoi_list2[isite] is None:
return None
if surface_calculation_options is None:
surface_calculation_options = {'type': 'standard_elliptic',
'distance_bounds': {'lower': 1.2, 'upper': 1.8},
'angle_bounds': {'lower': 0.1, 'upper': 0.8}}
if surface_calculation_options['type'] in ['standard_elliptic', 'standard_diamond', 'standard_spline']:
plot_type = {'distance_parameter': ('initial_normalized', None),
'angle_parameter': ('initial_normalized', None)}
else:
raise ValueError('Type "{}" for the surface calculation in DetailedVoronoiContainer '
'is invalid'.format(surface_calculation_options['type']))
max_dist = surface_calculation_options['distance_bounds']['upper'] + 0.1
bounds_and_limits = self.voronoi_parameters_bounds_and_limits(isite=isite,
plot_type=plot_type,
max_dist=max_dist)
distance_bounds = bounds_and_limits['distance_bounds']
angle_bounds = bounds_and_limits['angle_bounds']
lower_and_upper_functions = get_lower_and_upper_f(surface_calculation_options=surface_calculation_options)
mindist = surface_calculation_options['distance_bounds']['lower']
maxdist = surface_calculation_options['distance_bounds']['upper']
minang = surface_calculation_options['angle_bounds']['lower']
maxang = surface_calculation_options['angle_bounds']['upper']
f_lower = lower_and_upper_functions['lower']
f_upper = lower_and_upper_functions['upper']
surfaces = np.zeros((len(distance_bounds), len(angle_bounds)), np.float)
for idp in range(len(distance_bounds) - 1):
dp1 = distance_bounds[idp]
dp2 = distance_bounds[idp+1]
if dp2 < mindist or dp1 > maxdist:
continue
if dp1 < mindist:
d1 = mindist
else:
d1 = dp1
if dp2 > maxdist:
d2 = maxdist
else:
d2 = dp2
for iap in range(len(angle_bounds) - 1):
ap1 = angle_bounds[iap]
ap2 = angle_bounds[iap+1]
if ap1 > ap2:
ap1 = angle_bounds[iap + 1]
ap2 = angle_bounds[iap]
if ap2 < minang or ap1 > maxang:
continue
intersection, interror = rectangle_surface_intersection(rectangle=((d1, d2),
(ap1, ap2)),
f_lower=f_lower,
f_upper=f_upper,
bounds_lower=[mindist, maxdist],
bounds_upper=[mindist, maxdist],
check=False)
surfaces[idp][iap] = intersection
return surfaces
@staticmethod
def _get_vertices_dist_ang_indices(parameter_indices_list):
pp0 = [pp[0] for pp in parameter_indices_list]
pp1 = [pp[1] for pp in parameter_indices_list]
min_idist = min(pp0)
min_iang = min(pp1)
max_idist = max(pp0)
max_iang = max(pp1)
i_min_angs = np.argwhere(np.array(pp1) == min_iang)
i_max_dists = np.argwhere(np.array(pp0) == max_idist)
pp0_at_min_iang = [pp0[ii[0]] for ii in i_min_angs]
pp1_at_max_idist = [pp1[ii[0]] for ii in i_max_dists]
max_idist_at_min_iang = max(pp0_at_min_iang)
min_iang_at_max_idist = min(pp1_at_max_idist)
p1 = (min_idist, min_iang)
p2 = (max_idist_at_min_iang, min_iang)
p3 = (max_idist_at_min_iang, min_iang_at_max_idist)
p4 = (max_idist, min_iang_at_max_idist)
p5 = (max_idist, max_iang)
p6 = (min_idist, max_iang)
return [p1, p2, p3, p4, p5, p6]
def maps_and_surfaces(self, isite, surface_calculation_type=None, max_dist=2.0, additional_conditions=None):
if self.voronoi_list2[isite] is None:
return None
if additional_conditions is None:
additional_conditions = [self.AC.ONLY_ACB]
surfaces = self.neighbors_surfaces(isite=isite, surface_calculation_type=surface_calculation_type,
max_dist=max_dist)
maps_and_surfaces = []
for cn, value in self._unique_coordinated_neighbors_parameters_indices[isite].items():
for imap, list_parameters_indices in enumerate(value):
thissurf = 0.0
for (idp, iap, iacb) in list_parameters_indices:
if iacb in additional_conditions:
thissurf += surfaces[idp, iap]
maps_and_surfaces.append({'map': (cn, imap), 'surface': thissurf,
'parameters_indices': list_parameters_indices})
return maps_and_surfaces
def maps_and_surfaces_bounded(self, isite, surface_calculation_options=None, additional_conditions=None):
if self.voronoi_list2[isite] is None:
return None
if additional_conditions is None:
additional_conditions = [self.AC.ONLY_ACB]
surfaces = self.neighbors_surfaces_bounded(isite=isite, surface_calculation_options=surface_calculation_options)
maps_and_surfaces = []
for cn, value in self._unique_coordinated_neighbors_parameters_indices[isite].items():
for imap, list_parameters_indices in enumerate(value):
thissurf = 0.0
for (idp, iap, iacb) in list_parameters_indices:
if iacb in additional_conditions:
thissurf += surfaces[idp, iap]
maps_and_surfaces.append({'map': (cn, imap), 'surface': thissurf,
'parameters_indices': list_parameters_indices})
return maps_and_surfaces
def neighbors(self, isite, distfactor, angfactor, additional_condition=None):
idist = None
dfact = None
for iwd, wd in enumerate(self.neighbors_normalized_distances[isite]):
if distfactor >= wd['min']:
idist = iwd
dfact = wd['max']
else:
break
iang = None
afact = None
for iwa, wa in enumerate(self.neighbors_normalized_angles[isite]):
if angfactor <= wa['max']:
iang = iwa
afact = wa['min']
else:
break
if idist is None or iang is None:
raise ValueError('Distance or angle parameter not found ...')
return [nb for nb in self.voronoi_list2[isite] if
nb['normalized_distance'] <= dfact and nb['normalized_angle'] >= afact]
def voronoi_parameters_bounds_and_limits(self, isite, plot_type, max_dist):
#Initializes the distance and angle parameters
if self.voronoi_list2[isite] is None:
return None
if plot_type is None:
plot_type = {'distance_parameter': ('initial_inverse_opposite', None),
'angle_parameter': ('initial_opposite', None)}
dd = [dist['min'] for dist in self.neighbors_normalized_distances[isite]]
dd[0] = 1.0
if plot_type['distance_parameter'][0] == 'initial_normalized':
dd.append(max_dist)
distance_bounds = np.array(dd)
dist_limits = [1.0, max_dist]
elif plot_type['distance_parameter'][0] == 'initial_inverse_opposite':
ddinv = [1.0 / dist for dist in dd]
ddinv.append(0.0)
distance_bounds = np.array([1.0 - invdist for invdist in ddinv])
dist_limits = [0.0, 1.0]
elif plot_type['distance_parameter'][0] == 'initial_inverse3_opposite':
ddinv = [1.0 / dist**3.0 for dist in dd]
ddinv.append(0.0)
distance_bounds = np.array([1.0 - invdist for invdist in ddinv])
dist_limits = [0.0, 1.0]
else:
raise NotImplementedError('Plotting type "{}" '
'for the distance is not implemented'.format(plot_type['distance_parameter']))
if plot_type['angle_parameter'][0] == 'initial_normalized':
aa = [0.0]
aa.extend([ang['max'] for ang in self.neighbors_normalized_angles[isite]])
angle_bounds = np.array(aa)
elif plot_type['angle_parameter'][0] == 'initial_opposite':
aa = [0.0]
aa.extend([ang['max'] for ang in self.neighbors_normalized_angles[isite]])
aa = [1.0 - ang for ang in aa]
angle_bounds = np.array(aa)
else:
raise NotImplementedError('Plotting type "{}" '
'for the angle is not implemented'.format(plot_type['angle_parameter']))
ang_limits = [0.0, 1.0]
return {'distance_bounds': distance_bounds, 'distance_limits': dist_limits,
'angle_bounds': angle_bounds, 'angle_limits': ang_limits}
def is_close_to(self, other, rtol=0.0, atol=1e-8):
isclose = (np.isclose(self.normalized_angle_tolerance, other.normalized_angle_tolerance,
rtol=rtol, atol=atol) and
np.isclose(self.normalized_distance_tolerance, other.normalized_distance_tolerance,
rtol=rtol, atol=atol) and
self.additional_conditions == other.additional_conditions and
self.valences == other.valences)
if not isclose:
return isclose
for isite, site_voronoi in enumerate(self.voronoi_list2):
self_to_other_nbs = {}
for inb, nb in enumerate(site_voronoi):
if nb is None:
if other.voronoi_list2[isite] is None:
continue
else:
return False
else:
if other.voronoi_list2[isite] is None:
return False
nb_other = None
for inb2, nb2 in enumerate(other.voronoi_list2[isite]):
if nb['site'] == nb2['site']:
self_to_other_nbs[inb] = inb2
nb_other = nb2
break
if nb_other is None:
return False
if not np.isclose(nb['distance'], nb_other['distance'],
rtol=rtol, atol=atol):
return False
if not np.isclose(nb['angle'], nb_other['angle'],
rtol=rtol, atol=atol):
return False
if not np.isclose(nb['normalized_distance'], nb_other['normalized_distance'],
rtol=rtol, atol=atol):
return False
if not np.isclose(nb['normalized_angle'], nb_other['normalized_angle'],
rtol=rtol, atol=atol):
return False
if nb['index'] != nb_other['index']:
return False
if nb['site'] != nb_other['site']:
return False
return True
def get_rdf_figure(self, isite, normalized=True, figsize=None,
step_function=None):
def dp_func(dp):
return 1.0 - 1.0 / np.power(dp, 3.0)
import matplotlib.pyplot as plt
if step_function is None:
step_function = {'type': 'normal_cdf', 'scale': 0.0001}
# Initializes the figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
subplot = fig.add_subplot(111)
if normalized:
dists = self.neighbors_normalized_distances[isite]
else:
dists = self.neighbors_distances[isite]
if step_function['type'] == 'step_function':
isorted = np.argsort([dd['min'] for dd in dists])
sorted_dists = [dists[ii]['min'] for ii in isorted]
dnb_dists = [len(dists[ii]['dnb_indices']) for ii in isorted]
xx = [0.0]
yy = [0.0]
for idist, dist in enumerate(sorted_dists):
xx.append(dist)
xx.append(dist)
yy.append(yy[-1])
yy.append(yy[-1]+dnb_dists[idist])
xx.append(1.1*xx[-1])
yy.append(yy[-1])
elif step_function['type'] == 'normal_cdf':
scale = step_function['scale']
mydists = [dp_func(dd['min']) for dd in dists]
mydcns = [len(dd['dnb_indices']) for dd in dists]
xx = np.linspace(0.0, 1.1*max(mydists), num=500)
yy = np.zeros_like(xx)
for idist, dist in enumerate(mydists):
yy += mydcns[idist] * normal_cdf_step(xx, mean=dist, scale=scale)
else:
raise ValueError('Step function of type "{}" is not allowed'.format(step_function['type']))
subplot.plot(xx, yy)
return fig
def get_sadf_figure(self, isite, normalized=True, figsize=None,
step_function=None):
def ap_func(ap):
return np.power(ap, -0.1)
import matplotlib.pyplot as plt
if step_function is None:
step_function = {'type': 'step_function', 'scale': 0.0001}
# Initializes the figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
subplot = fig.add_subplot(111)
if normalized:
angs = self.neighbors_normalized_angles[isite]
else:
angs = self.neighbors_angles[isite]
if step_function['type'] == 'step_function':
isorted = np.argsort([ap_func(aa['min']) for aa in angs])
sorted_angs = [ap_func(angs[ii]['min']) for ii in isorted]
dnb_angs = [len(angs[ii]['dnb_indices']) for ii in isorted]
xx = [0.0]
yy = [0.0]
for iang, ang in enumerate(sorted_angs):
xx.append(ang)
xx.append(ang)
yy.append(yy[-1])
yy.append(yy[-1]+dnb_angs[iang])
xx.append(1.1*xx[-1])
yy.append(yy[-1])
elif step_function['type'] == 'normal_cdf':
scale = step_function['scale']
myangs = [ap_func(aa['min']) for aa in angs]
mydcns = [len(dd['dnb_indices']) for dd in angs]
xx = np.linspace(0.0, 1.1*max(myangs), num=500)
yy = np.zeros_like(xx)
for iang, ang in enumerate(myangs):
yy += mydcns[iang] * normal_cdf_step(xx, mean=ang, scale=scale)
else:
raise ValueError('Step function of type "{}" is not allowed'.format(step_function['type']))
subplot.plot(xx, yy)
return fig
def __eq__(self, other):
return (self.normalized_angle_tolerance == other.normalized_angle_tolerance and
self.normalized_distance_tolerance == other.normalized_distance_tolerance and
self.additional_conditions == other.additional_conditions and
self.valences == other.valences and
self.voronoi_list2 == other.voronoi_list2 and
self.structure == other.structure)
def __ne__(self, other):
return not self == other
def to_bson_voronoi_list2(self):
"""
Transforms the voronoi_list into a vlist + bson_nb_voro_list, that are BSON-encodable.
:return: [vlist, bson_nb_voro_list], to be used in the as_dict method
"""
bson_nb_voro_list2 = [None] * len(self.voronoi_list2)
for ivoro, voro in enumerate(self.voronoi_list2):
if voro is None or voro == 'None':
continue
site_voro = []
# {'site': neighbors[nn[1]],
# 'angle': sa,
# 'distance': distances[nn[1]],
# 'index': myindex}
for nb_dict in voro:
site = nb_dict['site']
site_dict = {key: val for key, val in nb_dict.items() if key not in ['site']}
#site_voro.append([ps.as_dict(), dd]) [float(c) for c in self._fcoords]
diff = site._fcoords - self.structure[nb_dict['index']]._fcoords
site_voro.append([[nb_dict['index'], [float(c) for c in diff]],
site_dict])
bson_nb_voro_list2[ivoro] = site_voro
return bson_nb_voro_list2
def as_dict(self):
"""
Bson-serializable dict representation of the VoronoiContainer.
:return: dictionary that is BSON-encodable
"""
bson_nb_voro_list2 = self.to_bson_voronoi_list2()
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"bson_nb_voro_list2": bson_nb_voro_list2,
# "neighbors_lists": self.neighbors_lists,
"structure": self.structure.as_dict(),
"normalized_angle_tolerance": self.normalized_angle_tolerance,
"normalized_distance_tolerance": self.normalized_distance_tolerance,
"additional_conditions": self.additional_conditions,
"valences": self.valences,
"maximum_distance_factor": self.maximum_distance_factor,
"minimum_angle_factor": self.minimum_angle_factor}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the VoronoiContainer object from a dict representation of the VoronoiContainer created using
the as_dict method.
:param d: dict representation of the VoronoiContainer object
:return: VoronoiContainer object
"""
structure = Structure.from_dict(d['structure'])
voronoi_list2 = from_bson_voronoi_list2(d['bson_nb_voro_list2'], structure)
maximum_distance_factor = d['maximum_distance_factor'] if 'maximum_distance_factor' in d else None
minimum_angle_factor = d['minimum_angle_factor'] if 'minimum_angle_factor' in d else None
return cls(structure=structure, voronoi_list2=voronoi_list2,
# neighbors_lists=neighbors_lists,
normalized_angle_tolerance=d['normalized_angle_tolerance'],
normalized_distance_tolerance=d['normalized_distance_tolerance'],
additional_conditions=d['additional_conditions'],
valences=d['valences'],
maximum_distance_factor=maximum_distance_factor,
minimum_angle_factor=minimum_angle_factor)
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import inspect
import itertools
import logging
import time
import types
from types import NoneType
from google.appengine.ext import db, ndb
from mcfw.cache import set_cache_key
from mcfw.consts import MISSING
from mcfw.properties import get_members, simple_types, object_factory, long_property, unicode_property, typed_property
class ErrorResponse(object):
status_code = long_property('1')
error = unicode_property('2')
data = typed_property('3', dict)
def __init__(self, rest_exception):
"""
Args:
rest_exception (mcfw.exceptions.HttpException):
"""
self.status_code = rest_exception.http_code
self.error = u'%s' % rest_exception.error
self.data = rest_exception.data
class MissingArgumentException(Exception):
def __init__(self, name, func=None):
Exception.__init__(self, "%s is a required argument%s!" % (
name, (' in function %s' % func.func_name) if func else ''))
self.name = name
def log_access(call=True, response=True):
def wrap(f):
def logged(*args, **kwargs):
if call:
arg_str = ""
for i, arg in enumerate(args):
arg_str += " %s: %s\n" % (i, arg)
kwarg_str = ""
for kw, arg in kwargs.iteritems():
kwarg_str += " %s: %s\n" % (kw, arg)
logging.debug(u"%s.%s\nargs:\n%skwargs:\n%s" % (f.__module__, f.__name__, arg_str, kwarg_str))
start = time.time()
try:
result = f(*args, **kwargs)
if response:
end = time.time()
logging.debug(
u"%s.%s finished in %s seconds returning %s" % (f.__module__, f.__name__, end - start, result))
return result
except:
if response:
end = time.time()
logging.exception(u"%s.%s failed in %s seconds" % (f.__module__, f.__name__, end - start))
raise
set_cache_key(logged, f)
logged.__name__ = f.__name__
logged.__module__ = f.__module__
if hasattr(f, u"meta"):
logged.meta.update(f.meta)
return logged
return wrap
def arguments(**kwarg_types):
""" The arguments decorator function describes & validates the parameters of the function."""
for value in kwarg_types.itervalues():
_validate_type_spec(value)
def wrap(f):
# validate argspec
f_args = inspect.getargspec(f)
f_args = inspect.ArgSpec([a for a in f_args[0] if a not in ('self', 'cls')], f_args[1], f_args[2], f_args[3])
f_arg_count = len(f_args[0])
f_defaults = f_args[3]
if not f_defaults:
f_defaults = []
f_arg_defaults_count = len(f_defaults)
f_arg_no_defaults_count = f_arg_count - f_arg_defaults_count
f_arg_defaults = {
f_args[0][i]: f_defaults[i - f_arg_no_defaults_count] if i >= f_arg_no_defaults_count else MISSING
for i in xrange(f_arg_count)}
f_pure_default_args_dict = {f_args[0][i]: f_defaults[i - f_arg_no_defaults_count]
for i in xrange(f_arg_no_defaults_count, f_arg_count)}
if f_arg_count != len(kwarg_types):
raise ValueError('%s: function signature contains a different amount of arguments than the type annotations.'
'\nExpected: %s\nActual: %s' % (f.func_name, kwarg_types.keys(), f_args.args))
unknown_args = [arg for arg in f_args[0] if arg not in kwarg_types]
if unknown_args:
raise ValueError("No type information is supplied for %s!" % ", ".join(unknown_args))
def typechecked_f(*args, **kwargs):
arg_length = len(args)
if arg_length > f_arg_count:
raise ValueError("%s() takes %s arguments (%s given)" % (f.__name__, f_arg_count, arg_length))
for i in xrange(arg_length):
kwargs[f_args[0][i]] = args[i]
# accept MISSING as magical value or not
accept_missing = u'accept_missing' in kwargs
if accept_missing:
kwargs.pop(u'accept_missing')
# apply default value if available
for arg in kwarg_types:
value = kwargs.get(arg, f_arg_defaults[arg])
if value is MISSING:
value = f_arg_defaults.get(arg, MISSING)
kwargs[arg] = value
# validate number of arguments
if not len(kwargs) == len(kwarg_types):
raise ValueError("kwarg mismatch\nExpected:%s\nGot:%s" % (kwarg_types, kwargs))
# validate supplied arguments
unknown_args = [arg for arg in kwargs if arg not in kwarg_types]
if unknown_args:
raise ValueError("Unknown argument(s) %s supplied!" % ", ".join(unknown_args))
# validate argument values
for arg in kwargs:
_check_type(arg, kwarg_types[arg], kwargs[arg], accept_missing=accept_missing, func=f)
return f(**kwargs)
set_cache_key(typechecked_f, f)
typechecked_f.__name__ = f.__name__
typechecked_f.__module__ = f.__module__
typechecked_f.meta[u"fargs"] = f_args
typechecked_f.meta[u"kwarg_types"] = kwarg_types
typechecked_f.meta[u"pure_default_args_dict"] = f_pure_default_args_dict
if hasattr(f, u"meta"):
typechecked_f.meta.update(f.meta)
return typechecked_f
return wrap
def returns(type_=NoneType):
""" The retunrs decorator function describes & validates the result of the function."""
_validate_type_spec(type_)
def wrap(f):
def typechecked_return(*args, **kwargs):
result = f(*args, **kwargs)
return _check_type(u"Result", type_, result, func=f)
set_cache_key(typechecked_return, f)
typechecked_return.__name__ = f.__name__
typechecked_return.__module__ = f.__module__
typechecked_return.meta[u"return_type"] = type_
if hasattr(f, u"meta"):
typechecked_return.meta.update(f.meta)
return typechecked_return
return wrap
def run(function, args, kwargs):
kwargs['accept_missing'] = None
result = function(*args, **kwargs)
type_, islist = _get_return_type_details(function)
return serialize_value(result, type_, islist, skip_missing=True)
def parse_parameters(function, parameters):
kwarg_types = get_parameter_types(function)
return get_parameters(parameters, kwarg_types)
def parse_complex_value(type_, value, islist):
if value is None:
return None
parser = _get_complex_parser(type_)
if islist:
return map(parser, value)
else:
return parser(value)
def check_function_metadata(function):
if "kwarg_types" not in function.meta or "return_type" not in function.meta:
raise ValueError("Can not execute function. Too little meta information is available!")
def get_parameter_types(function):
return function.meta["kwarg_types"]
def get_parameters(parameters, kwarg_types):
return {name: parse_parameter(name, type_, parameters[name]) if name in parameters else MISSING
for name, type_ in kwarg_types.iteritems()}
def get_type_details(type_, value=MISSING):
if isinstance(type_, tuple):
# The value can have multiple types.
if value is not MISSING:
# We must find the type by comparing the possible types with the real type of <value>
value_is_list = isinstance(value, list)
if value_is_list:
if not value:
return unicode, True # The type doesn't matter, the list is empty
value = value[0]
for t in type_:
is_list = isinstance(t, list)
if is_list != value_is_list:
continue
if is_list:
t = t[0]
if t in (str, unicode):
type_to_check = (str, unicode)
elif t in (int, long):
type_to_check = (int, long)
else:
type_to_check = t
if isinstance(value, type_to_check):
return type(value), is_list
# Weird... type not found and @arguments didn't raise... The serialization will probably fail.
is_list = isinstance(type_, list)
if is_list:
type_ = type_[0]
return type_, is_list
def serialize_complex_value(value, type_, islist, skip_missing=False):
if type_ == dict:
return value
def optimal_serializer(val):
if not isinstance(type_, object_factory) and isinstance(val, type_):
serializer = _get_complex_serializer(val.__class__)
else:
serializer = _get_complex_serializer(type_)
return serializer(val, skip_missing)
if value is None:
return None
if islist:
try:
return map(optimal_serializer, value)
except:
logging.warn("value for type %s was %s", type_, value)
raise
else:
return optimal_serializer(value)
def serialize_value(value, type_, islist, skip_missing=False):
if value is None \
or type_ in simple_types \
or (isinstance(type_, tuple) and all(t in simple_types for t in type_)):
return value
else:
return serialize_complex_value(value, type_, islist, skip_missing)
def parse_parameter(name, type_, value):
raw_type, is_list = get_type_details(type_, value)
if isinstance(value, list) != is_list:
raise ValueError("list expected for parameter %s and got %s or vice versa!" % (name, value))
if isinstance(value, list):
return map(lambda x: _parse_value(name, raw_type, x), value)
else:
return _parse_value(name, raw_type, value)
def _validate_type_spec(type_):
if isinstance(type_, list) and len(type_) != 1:
raise ValueError("Illegal type specification!")
DICT_KEY_ITERATOR_TYPE = type(dict().iterkeys())
def _check_type(name, type_, value, accept_missing=False, func=None):
if value == MISSING:
if accept_missing:
return value
else:
raise MissingArgumentException(name, func)
checktype = (str, unicode) if type_ in (str, unicode) else type_
checktype = (int, long) if checktype in (int, long) else checktype
if value is None and (isinstance(checktype, list) or type_ not in (int, long, float, bool)):
return value
if isinstance(type_, tuple):
# multiple types are allowed. checking if value is one of the them.
errors = []
for t in type_:
try:
return _check_type(name, t, value, accept_missing, func)
except (ValueError, TypeError) as e:
errors.append(e)
continue
logging.debug('\n\n'.join(map(str, errors)))
raise ValueError("%s is not of expected type %s! Its type is %s:\n%s" % (name, str(type_), type(value), value))
if isinstance(checktype, list) and isinstance(value, list):
checktype = (str, unicode) if checktype[0] in (str, unicode) else checktype[0]
for i, x in enumerate(value):
t = checktype.get_subtype(x) if isinstance(checktype, object_factory) else checktype
if not isinstance(x, t):
raise ValueError(
"%s: Not all items were of expected type %s. Encountered an item at index %s with type %s: %s."
% (name, str(checktype), i, type(x), x))
elif isinstance(checktype, list) and isinstance(value, (
types.GeneratorType, db.Query, ndb.Query, db._QueryIterator, itertools.chain, DICT_KEY_ITERATOR_TYPE)):
checktype = (str, unicode) if checktype[0] in (str, unicode) else checktype[0]
def checkStreaming():
for o in value:
if not isinstance(o, checktype):
raise ValueError(
"%s: Not all items were of expected type %s. Encountered an item with type %s: %s."
% (name, str(checktype), type(o), o))
yield o
return checkStreaming()
elif checktype == type and isinstance(value, list):
if len(value) != 1:
raise ValueError("%s: unexpected type count (%s)" % (name, len(value)))
def check(t, i):
if not isinstance(t, type):
raise ValueError(
"%s: Not all items were of expected type %s. Encountered an item at index %s with type %s: %s."
% (name, str(checktype), i, type(x), x))
if isinstance(value[0], tuple):
for i, t in enumerate(value[0]):
check(t, i)
else:
check(value[0], 0)
else:
if isinstance(checktype, object_factory):
checktype = checktype.get_subtype(value)
try:
if not isinstance(value, checktype):
raise ValueError(
"%s is not of expected type %s! Its type is %s:\n%s" % (name, str(checktype), type(value), value))
except TypeError as e:
raise TypeError("%s\nvalue: %s\nchecktype: %s" % (e.message, value, checktype))
return value
_complexParserCache = {}
def _get_complex_parser(type_):
if type_ is dict:
return lambda x: x
if type_ not in _complexParserCache:
def parse(value):
t = type_.get_subtype(value) if isinstance(type_, object_factory) else type_
inst = t()
complex_members, simple_members = get_members(t)
for name, prop in simple_members:
setattr(inst, name, value[name] if name in value else getattr(t, name).default)
for name, prop in complex_members:
setattr(inst, name, parse_complex_value(
prop.get_subtype(inst) if (prop.subtype_attr_name and prop.subtype_mapping) else prop.type,
value[name], prop.list) if name in value else MISSING)
return inst
_complexParserCache[type_] = parse
return parse
else:
return _complexParserCache[type_]
_value_types = {int, long, float, bool, NoneType}
def _parse_value(name, type_, value):
def raize():
raise ValueError("Incorrect type received for parameter '%s'. Expected %s and got %s (%s)."
% (name, type_, type(value), value))
istuple = isinstance(type_, tuple)
if (istuple and set(type_).issubset(_value_types)) or type_ in _value_types:
if not isinstance(value, type_):
raize()
return value
elif istuple:
for tt in type_:
try:
return _parse_value(name, tt, value)
except ValueError:
pass
raize()
elif value is None:
return None
elif type_ == unicode:
if not isinstance(value, (str, unicode)):
raize()
return value if isinstance(value, unicode) else unicode(value)
elif type_ == str:
if not isinstance(value, (str, unicode)):
raize()
return value
elif not isinstance(value, dict):
raize()
return parse_complex_value(type_, value, False)
_complex_serializer_cache = {}
def _get_complex_serializer(type_):
if type_ not in _complex_serializer_cache:
def serializer(value, skip_missing):
t = type_.get_subtype(value) if isinstance(type_, object_factory) else type_
complex_members, simple_members = get_members(t)
result = {name: getattr(value, name) for (name, _) in simple_members
if not skip_missing or getattr(value, name) is not MISSING}
def _serialize(name, prop):
attr = getattr(value, name)
real_type = prop.get_subtype(value) if (prop.subtype_attr_name and prop.subtype_mapping) else prop.type
serialized_value = serialize_complex_value(attr, real_type, prop.list, skip_missing)
return serialized_value
for (name, prop) in complex_members:
if not skip_missing or getattr(value, name) is not MISSING:
result[name] = _serialize(name, prop)
return result
_complex_serializer_cache[type_] = serializer
return serializer
else:
return _complex_serializer_cache[type_]
def _get_return_type_details(function):
return get_type_details(function.meta["return_type"])
|
|
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.utils import DatabaseError
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext_lazy as _
import bible # python-bible module. See http://github.com/jasford/python-bible
from fields import VerseField
class BibleBase(object):
" Base class from which Bible, Book, and Verse implement. "
def __repr__(self):
return u'<%s: %s>' % (self.__class__.__name__, self.__str__())
def __str__(self):
if hasattr(self, '__unicode__'):
return force_unicode(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def __iter__(self):
for i in xrange(len(self)):
yield self._get_element(i)
def __ne__(self, other):
return not self.__eq__(other)
def __le__(self, other):
if (self.__lt__(other) or self.__eq__(other)):
return True
return False
def __gt__(self, other):
return not self.__le__()
def __ge__(self, other):
return not self.__lt__()
class Bible(BibleBase):
" Represents a Bible (version/translation.) "
def __init__(self, name, translation, book_data=None, language='English',
chapter_text='Chapter %d', psalm_text='Psalm %d'):
self.name = name
self.translation = translation # Letter code, eg: 'KJV'
self._books = [] # Populate with self._set_books(book_data)
self._chapter_text = chapter_text
self._psalm_text = psalm_text
if book_data:
self.set_books(book_data)
self.language = language
# Other natively supported parameters: TODO.
self._introduction = None # Use Markdown
self._preface = None # Use Markdown
self._title_page = None # Use Markdown
def set_books(self, book_data):
"""
Set the books, based on the given book_data.
@args::
`book_data`: list of dictionaries eg:
[{
'testament': 'NT',
'verse_counts': [25, 25, 22, 19, 14],
'name': '1 Peter',
'abbrs': ['1pet', '1p', '1pe', '1 pe', '1pt', '1pe', '1 pet', '1 pt', '1 pe'],
'altname': 'The First Epistle General of Peter',
'shortname': '', # Used to shorten 'St. John' to 'John'; defaults to 'name'.
},...]
"""
book_num = 1
for data in book_data:
self._books.append(Book(self, number=book_num, **data))
book_num += 1
@property
def num_books(self):
return len(self._books)
def __unicode__(self):
return self.translation
def __repr__(self):
return u'<Bible: %s>' % self.translation
def __len__(self):
" Returns the number of books (66) in this Bible. "
return self.num_books
@property
def num_verses(self):
num = 0
for book in self:
num += book.num_verses
return num
def _get_element(self, i):
assert 0 <= i < len(self)
return self._books[i]
def __getitem__(self, key):
" Get a specific book of the bible. NB: Slices and negative indexes are supported. "
if not isinstance(key, (slice, int, long)):
raise TypeError
if isinstance(key, slice):
(start, end, step) = key.indices(len(self)+1)
if start == 0:
raise IndexError
if start in range(1, len(self._books)+1):
start -= 1
if end in range(len(self._books)+1):
end -= 1
return self._books[start:end:step]
if key in range(1, len(self._books)+1): # key is the logical book number (1-66).
return self._books[key-1]
elif key in range(-len(self._books)-1, 0): # Negative index.
return self._books[key]
else:
raise IndexError
@models.permalink
def get_absolute_url(self):
return ('bibletext_bible_detail', (), {
'version': self.translation})
def list_old_testament_books(self):
l = []
for book in self:
if book.testament == 'OT':
l.append(book)
else:
break
return l
def list_new_testament_books(self):
l = []
for book in self:
if book.testament == 'NT':
l.append(book)
return l
def __eq__(self, other):
if type(self) == type(other):
return self.translation == other.translation
return False
# Other rich comparators are explicitly NotImplemented on the Bible itself.
def __lt__(self, other):
raise NotImplemented
def __le__(self, other):
raise NotImplemented
def __gt__(self, other):
raise NotImplemented
def __ge__(self, other):
raise NotImplemented
class Book(BibleBase):
" Book object. Represents a book of the Bible. "
def __init__(self, bible=None, testament=None, number=None, name=None, abbreviations=None, verse_counts=None,
omissions=None, altname=None, shortname=None, chapter_text=None):
"""
Book __init__.
@args::
`bible`: The bible object that this book belongs to.
`testament`: NT or OT.
`number`: (int) Book number (starting from 1).
`name`: The name of the book.
`abbreviations`: List of abbreviations for the book name.
`verse_counts`: (6, 10, ... ) list of list of verse counts (per chapter), all integers.
`altname`: A long form name of this book. eg: "The gospel according to St John". Defaults to None.
`shortname`: Used to shorten 'St. John' to 'John'; defaults to 'name'.
`omissions`: {12: [4,6,9, ...], ... } A mapping of chapter to lists of verse numbers that are omitted.
"""
self.bible = bible
self.testament = testament
self.number = number # int(book number)
self.name = name
self.shortname = self.name if not shortname else shortname
self.abbreviations = abbreviations
self._chapters = []
chapter_num = 1
for verse_count in verse_counts:
#print chapter_num, verse_count, type(verse_count)
verse_list = range(1, verse_count+1)
verse_omissions = None
if omissions and chapter_num in omissions:
verse_omissions = omissions[chapter_num]
self._chapters.append(Chapter(self, chapter_num, verse_list, omissions=verse_omissions, chapter_text=chapter_text))
chapter_num += 1
self.num_chapters = len(self._chapters)
self.altname = altname
def __unicode__(self):
return self.name
def __len__(self):
" Return the number of chapters. "
return len(self._chapters)
@property
def has_one_chapter(self):
return True if len(self) == 1 else False
@property
def num_verses(self):
num = 0
for chapter in self:
num += len(chapter)
return num
def _get_element(self, i):
assert 0 <= i < len(self)
return self._chapters[i]
def __getitem__(self, key):
" Get a specific chapter of this book. NB: Slices and negative indexes are supported. "
if not isinstance(key, (slice, int, long)):
raise TypeError
if isinstance(key, slice):
(start, end, step) = key.indices(len(self)+1)
if start == 0:
raise IndexError
if start in range(1, len(self._chapters)+1):
start -= 1
if end in range(len(self._chapters)+1):
end -= 1
return self._chapters[start:end:step]
if key in range(1, len(self._chapters)+1): # key is the logical chapter number.
return self._chapters[key-1]
elif key in range(-len(self._chapters)-1, 0): # Negative index.
return self._chapters[key]
else:
raise IndexError
@property
def next(self):
" Next book. "
if self.number < 66:
return self.bible[self.number+1]
return None
@property
def prev(self):
" Previous book. "
if self.number > 1:
return self.bible[self.number-1]
return None
@models.permalink
def get_absolute_url(self):
return ('bibletext_book_detail', (), {
'version': self.bible.translation,
'book_id': self.number})
def __eq__(self, other):
if type(self) == type(other):
return (self.bible, self.number) == (other.bible, other.number)
return False
def __lt__(self, other):
if type(self) == type(other):
if self.number == other.number:
return self.number < other.number
return False
class Chapter(BibleBase):
"""
Chapter object. Represents a chapter in a book of the Bible.
@args::
`book`: :models:`bibletext.Book` that this chapter belongs to.
`number`: int chapter number.
`verses`: List of the verse numbers present in this Chapter.
`chapter_text`: Defaults to `_('Chapter %d')`, and `_('Psalm %d')` for Psalms.
"""
def __init__(self, book, number, verses, omissions=None, chapter_text=None):
self.bible = book.bible
self.book = book # Needs to know the book we're in for comparators to operate.
self.number = number # int(chapter number)
if chapter_text:
self.chapter_text = chapter_text
elif self.book.number == 19: # Psalms are Psalms, not Chapters.
self.chapter_text = self.bible._psalm_text
else:
self.chapter_text = self.bible._chapter_text
self.name = self.chapter_text % self.number
self._verses = []
for verse in verses:
self._verses.append(Verse(self, verse)) # NB: verse is the Verse number.
self.num_verses = len(self._verses)
def __unicode__(self):
if len(self.book) == 1: # Only one chapter to the book, omit the chapter.
return self.book.shortname
return u'%s %s' % (self.book.shortname, self.number)
def __len__(self):
" Return the number of verses. "
return self.num_verses
def _get_element(self, i):
assert 0 <= i < len(self)
return self._verses[i]
def __getitem__(self, key):
" Get a specific verse of this chapter. NB: Slices and negative indexes are supported. "
if not isinstance(key, (slice, int, long)):
raise TypeError
if isinstance(key, slice):
(start, end, step) = key.indices(len(self)+1)
if start == 0:
raise IndexError
if start in range(1, len(self._verses)+1):
start -= 1
if end in range(len(self._verses)+1):
end -= 1
return self._verses[start:end:step]
if key in range(1, len(self._verses)+1): # key is the logical verse number.
return self._verses[key-1]
elif key in range(-len(self._verses)-1, 0): # Negative index.
return self._verses[key]
else:
raise IndexError
@property
def next(self):
" Next chapter (can be from a different book). "
if self.number < len(self.book):
return self.book[self.number+1]
else: # Next book, chapter 1.
return self.book.next[1]
@property
def prev(self):
" Previous chapter (can be from a different book). "
if self.number > 1:
return self.book[self.number-1]
else: # Previous book, last chapter.
return self.book.prev[-1]
@models.permalink
def get_absolute_url(self):
return ('bibletext_chapter_detail', (), {
'version': self.book.bible.translation,
'book_id': self.book.number,
'chapter_id': self.number})
def __eq__(self, other):
if type(self) == type(other):
return (self.book, self.number) == (other.book, other.number)
return False
def __lt__(self, other):
if type(self) == type(other):
if self.bible == other.bible:
if self.book < other.book or (self.book == other.book and self.number < other.number):
return True
return False
def __le__(self, other):
if (self.__lt__(other) or self.__eq__(other)):
return True
return False
class Verse(BibleBase):
"""
Verse object - this is used for formatting purposes.
Also links in with the VerseText implementation.
@args::
`chapter`: The chapter object that this Verse belongs to.
`number`: int verse number.
"""
def __init__(self, chapter, number):
self.bible = chapter.book.bible
self.book = chapter.book
self.chapter = chapter
self.number = number
if len(self.book) > 1:
self.name = u'%s:%s' % (self.chapter.number, self.number)
else: # Books with one chapter.
self.name = unicode(self.number)
def __unicode__(self):
if self.book.has_one_chapter:
return u'%s %s' % (self.chapter, self.number)
return u'%s:%s' % (self.chapter, self.number)
@property
def next(self):
" Next verse (can be from a different chapter and book). "
if self.number < len(self.chapter):
return self.chapter[self.number+1]
else: # Next chapter, verse 1.
return self.chapter.next[1]
@property
def prev(self):
" Previous verse (can be from a different chapter and book). "
if self.number > 1:
return self.chapter[self.number-1]
else: # Previous chapter, last verse.
return self.chapter.prev[-1]
@models.permalink
def get_absolute_url(self):
return ('bibletext_verse_detail', (), {
'version':self.book.bible.translation,
'book_id': self.book.number,
'chapter_id': self.chapter.number,
'verse_id': self.number})
def __eq__(self, other):
if type(self) == type(other):
return (self.chapter, self.number) == (other.chapter, other.number)
return False
def __lt__(self, other):
if type(self) == type(other):
if self.bible == other.bible:
if self.book < other.book or self.chapter < other.chapter or \
(self.book == other.book and self.chapter == other.chapter and
self.number < other.number):
return True
return False
class BiblePassageManager(models.Manager):
" NB: verse and passage work with English at present. "
def verse(self, reference):
" Takes textual verse information and returns the Verse. "
if self.model.translation and reference[-3] != self.model.translation:
reference += ' '+self.model.translation
verse = bible.Verse(reference)
return self.get_query_set().get(book_id=verse.book, chapter_id=verse.chapter, verse_id=verse.verse)
def passage(self, start_reference, end_reference=None):
"""
Takes textual passage information and returns the Verse(s).
Note: you can't just input 'Romans 1:1-2:3',
you'll need to do ('Romans 1:1', 'Romans 2:3') for the time being.
"""
if not end_reference: # Probably just a single verse, return a list anyway.
end_reference = start_reference
if self.model.translation and start_reference[-3] != self.model.translation:
start_reference += ' '+self.model.translation
if self.model.translation and end_reference[-3] != self.model.translation:
end_reference += ' '+self.model.translation
# NB: len(passage) gives us the number of Verses in the passage.
passage = bible.Passage(start_reference, end_reference)
# We'll get the number of verses from the start like so to save a db lookup:
in_the_beginning = 'Genesis 1:1'
if self.model.translation:
in_the_beginning += ' '+self.model.translation
start_pk = len(bible.Passage(in_the_beginning, start_reference))
return self.get_query_set().order_by('id').filter(pk__gte = start_pk)[:len(passage)]
class VerseText(models.Model):
"""
VerseText (Bible) model - implement this abstract class for translations/versions.
Each record (object) will be a single verse.
"""
book_id = models.PositiveIntegerField(default=1)
chapter_id = models.PositiveIntegerField(default=1)
verse_id = models.PositiveIntegerField(default=1)
text = models.TextField()
translation = None # Use the translation code (KJV, NKJV etc) here according to what python-bible supports.
bible = None # Must implement Bible() to get formattable chapters, and so forth.
objects = BiblePassageManager()
def __unicode__(self):
return u'%s %s:%s' % (self.book, self.chapter.number, self.verse.number)
class Meta:
ordering = ('id') # Should already be: ('book_id', 'chapter_id', 'verse_id')
unique_together = [('book_id', 'chapter_id', 'verse_id')]
app_label = 'bibletext'
abstract = True
@classmethod
def register_version(cls, *versions):
"""
Register a list of bible versions::
VerseText.register_version(
KJV,
)
You can call this function as often as you like to register more bible versions.
"""
if not hasattr(cls, 'versions'):
cls.versions = []
for version in versions:
try:
version_content_type = ContentType.objects.get_for_model(version)
if version_content_type.pk not in cls.versions:
cls.versions.append(version_content_type.pk)
except DatabaseError:
pass # We're probably on an initial syncdb, and there are no tables created.
@property
def book(self):
" Book object. "
return self.bible[self.book_id]
@property
def chapter(self):
" Chapter object. "
return self.book[self.chapter_id]
@property
def verse(self):
" Verse object. "
return self.chapter[self.verse_id]
@models.permalink
def get_absolute_url(self):
return ('bibletext_verse_detail', (), {
'version':self.translation,
'book_id': self.book.number,
'chapter_id': self.chapter.number,
'verse_id': self.verse.number})
@models.permalink
def get_chapter_url(self):
return ('bibletext_chapter_detail', (), {
'version':self.translation,
'book_id': self.book.number,
'chapter_id': self.chapter.number})
#---------------------
# Next/Previous Verses
@property
def next_verse(self):
if hasattr(self, '_next_verse'):
return self._next_verse
try:
self._next_verse = self.__class__.objects.get(pk=self.pk+1)
return self._next_verse
except self.__class__.DoesNotExist:
self._next_verse = None
return self._next_verse
@property
def prev_verse(self):
if hasattr(self, '_prev_verse'):
return self._prev_verse
if self.book_id == 1 and self.chapter_id == 1 and self.verse_id == 1:
self._prev_verse = None # Genesis 1:1 has no previous verse.
return self._prev_verse
self._prev_verse = self.__class__.objects.get(pk=self.pk-1)
return self._prev_verse
#-----------------------
# Next/Previous Chapters
@property
def next_chapter(self):
" Returns a Chapter object or None. "
return self.chapter.next
@property
def prev_chapter(self):
" Returns a Chapter object or None. "
return self.chapter.prev
#---------------------
# Next/Previous Books
@property
def next_book(self):
" Returns a Book object or None. "
return self.book.next
@property
def prev_book(self):
" Returns a Book object or None. "
return self.book.prev
|
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_wireless_controller_setting
short_description: VDOM wireless controller configuration in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify wireless_controller feature and setting category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
wireless_controller_setting:
description:
- VDOM wireless controller configuration.
default: null
type: dict
suboptions:
account_id:
description:
- FortiCloud customer account ID.
type: str
country:
description:
- Country or region in which the FortiGate is located. The country determines the 802.11 bands and channels that are available.
type: str
choices:
- NA
- AL
- DZ
- AO
- AR
- AM
- AU
- AT
- AZ
- BH
- BD
- BB
- BY
- BE
- BZ
- BO
- BA
- BR
- BN
- BG
- KH
- CL
- CN
- CO
- CR
- HR
- CY
- CZ
- DK
- DO
- EC
- EG
- SV
- EE
- FI
- FR
- GE
- DE
- GR
- GL
- GD
- GU
- GT
- HT
- HN
- HK
- HU
- IS
- IN
- ID
- IR
- IE
- IL
- IT
- JM
- JO
- KZ
- KE
- KP
- KR
- KW
- LV
- LB
- LI
- LT
- LU
- MO
- MK
- MY
- MT
- MX
- MC
- MA
- MZ
- MM
- NP
- NL
- AN
- AW
- NZ
- NO
- OM
- PK
- PA
- PG
- PY
- PE
- PH
- PL
- PT
- PR
- QA
- RO
- RU
- RW
- SA
- RS
- ME
- SG
- SK
- SI
- ZA
- ES
- LK
- SE
- SD
- CH
- SY
- TW
- TZ
- TH
- TT
- TN
- TR
- AE
- UA
- GB
- US
- PS
- UY
- UZ
- VE
- VN
- YE
- ZB
- ZW
- JP
- CA
duplicate_ssid:
description:
- Enable/disable allowing Virtual Access Points (VAPs) to use the same SSID name in the same VDOM.
type: str
choices:
- enable
- disable
fapc_compatibility:
description:
- Enable/disable FAP-C series compatibility.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: VDOM wireless controller configuration.
fortios_wireless_controller_setting:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
wireless_controller_setting:
account_id: "<your_own_value>"
country: "NA"
duplicate_ssid: "enable"
fapc_compatibility: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_wireless_controller_setting_data(json):
option_list = ['account_id', 'country', 'duplicate_ssid',
'fapc_compatibility']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def wireless_controller_setting(data, fos):
vdom = data['vdom']
wireless_controller_setting_data = data['wireless_controller_setting']
filtered_data = underscore_to_hyphen(filter_wireless_controller_setting_data(wireless_controller_setting_data))
return fos.set('wireless-controller',
'setting',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_wireless_controller(data, fos):
if data['wireless_controller_setting']:
resp = wireless_controller_setting(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"wireless_controller_setting": {
"required": False, "type": "dict", "default": None,
"options": {
"account_id": {"required": False, "type": "str"},
"country": {"required": False, "type": "str",
"choices": ["NA", "AL", "DZ",
"AO", "AR", "AM",
"AU", "AT", "AZ",
"BH", "BD", "BB",
"BY", "BE", "BZ",
"BO", "BA", "BR",
"BN", "BG", "KH",
"CL", "CN", "CO",
"CR", "HR", "CY",
"CZ", "DK", "DO",
"EC", "EG", "SV",
"EE", "FI", "FR",
"GE", "DE", "GR",
"GL", "GD", "GU",
"GT", "HT", "HN",
"HK", "HU", "IS",
"IN", "ID", "IR",
"IE", "IL", "IT",
"JM", "JO", "KZ",
"KE", "KP", "KR",
"KW", "LV", "LB",
"LI", "LT", "LU",
"MO", "MK", "MY",
"MT", "MX", "MC",
"MA", "MZ", "MM",
"NP", "NL", "AN",
"AW", "NZ", "NO",
"OM", "PK", "PA",
"PG", "PY", "PE",
"PH", "PL", "PT",
"PR", "QA", "RO",
"RU", "RW", "SA",
"RS", "ME", "SG",
"SK", "SI", "ZA",
"ES", "LK", "SE",
"SD", "CH", "SY",
"TW", "TZ", "TH",
"TT", "TN", "TR",
"AE", "UA", "GB",
"US", "PS", "UY",
"UZ", "VE", "VN",
"YE", "ZB", "ZW",
"JP", "CA"]},
"duplicate_ssid": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"fapc_compatibility": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_wireless_controller(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_wireless_controller(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
|
from unittest import TestCase
from unittest.mock import patch
from email_parser.model import *
from email_parser import renderer, const, config
class TestTextRenderer(TestCase):
def setUp(self):
self.email_locale = 'locale'
self.template = Template('dummy', [], '<style>body {}</style>', '<body>{{content1}}</body>',
['content', 'content1', 'content2'], None)
self.r = renderer.TextRenderer(self.template, self.email_locale)
def test_happy_path(self):
placeholders = {'content': Placeholder('content', 'dummy content')}
actual = self.r.render(placeholders)
self.assertEqual('dummy content', actual)
def test_render_variant(self):
placeholders = {'content': Placeholder('content', 'dummy content', variants={'B': 'awesome content'})}
actual = self.r.render(placeholders, variant='B')
self.assertEqual('awesome content', actual)
def test_concat_multiple_placeholders(self):
placeholders = {
'content1': Placeholder('content', 'dummy content'),
'content2': Placeholder('content2', 'dummy content')
}
expected = const.TEXT_EMAIL_PLACEHOLDER_SEPARATOR.join(['dummy content', 'dummy content'])
actual = self.r.render(placeholders)
self.assertEqual(expected, actual)
def test_concat_multiple_placeholders_with_variants(self):
placeholders = {
'content1': Placeholder('content', 'dummy content'),
'content2': Placeholder('content2', 'dummy content', variants={'B': 'awesome content'})
}
expected = const.TEXT_EMAIL_PLACEHOLDER_SEPARATOR.join(['dummy content', 'awesome content'])
actual = self.r.render(placeholders, variant='B')
self.assertEqual(expected, actual)
def test_ignore_subject(self):
placeholders = {
'content': Placeholder('content', 'dummy content'),
'subject': Placeholder('subject', 'dummy subject')
}
actual = self.r.render(placeholders)
self.assertEqual('dummy content', actual)
def test_ignore_empty_placeholders(self):
placeholders = {'content': Placeholder('content', 'dummy content'), 'empty': Placeholder('empty', '')}
actual = self.r.render(placeholders)
self.assertEqual('dummy content', actual)
def test_ignored_placeholders(self):
placeholders = {
'content': Placeholder('content', 'dummy content'),
'ignore': Placeholder('ignore', 'test', False)
}
r = renderer.TextRenderer(self.template, self.email_locale)
actual = r.render(placeholders)
self.assertEqual('dummy content', actual)
def test_use_text_and_url_for_links(self):
placeholders = {'content': Placeholder('content', 'dummy [link_text](http://link_url) content')}
actual = self.r.render(placeholders)
self.assertEqual('dummy link_text (http://link_url) content', actual)
def test_default_link_locale_for_links(self):
placeholders = {
'content': Placeholder('content', 'dummy [link_text](http://link_url?locale={link_locale}) content')
}
actual = self.r.render(placeholders)
self.assertEqual('dummy link_text (http://link_url?locale=locale) content', actual)
def test_link_locale_for_links(self):
self.email_locale = 'pt-BR'
placeholders = {
'content': Placeholder('content', 'dummy [link_text](http://link_url?locale={link_locale}) content')
}
r = renderer.TextRenderer(self.template, self.email_locale)
actual = r.render(placeholders)
self.assertEqual('dummy link_text (http://link_url?locale=pt) content', actual)
def test_use_text_if_href_is_empty(self):
placeholders = {'content': Placeholder('content', 'dummy [http://link_url]() content')}
actual = self.r.render(placeholders)
self.assertEqual('dummy http://link_url content', actual)
def test_use_href_if_text_is_same(self):
placeholders = {'content': Placeholder('content', 'dummy [http://link_url](http://link_url) content')}
actual = self.r.render(placeholders)
self.assertEqual('dummy http://link_url content', actual)
def test_url_with_params(self):
placeholders = {
'content': Placeholder('content', 'dummy [param_link](https://something.com/thing?id=mooo) content')
}
actual = self.r.render(placeholders)
self.assertEqual('dummy param_link (https://something.com/thing?id=mooo) content', actual)
def test_unordered_list(self):
placeholders = {'content': Placeholder('content', '- one\n- two\n- three')}
actual = self.r.render(placeholders)
self.assertEqual('- one\n- two\n- three', actual.strip())
def test_ordered_list(self):
placeholders = {'content': Placeholder('content', '1. one\n2. two\n3. three')}
actual = self.r.render(placeholders)
self.assertEqual('1. one\n2. two\n3. three', actual.strip())
class TestSubjectRenderer(TestCase):
def setUp(self):
self.r = renderer.SubjectRenderer()
self.placeholders = {
'content': Placeholder('content', 'dummy content'),
'subject': Placeholder('subject', 'dummy subject', variants={'B': 'experiment subject'})
}
def test_happy_path(self):
actual = self.r.render(self.placeholders)
self.assertEqual('dummy subject', actual)
def test_variant(self):
actual = self.r.render(self.placeholders, variant='B')
self.assertEqual('experiment subject', actual)
def test_raise_error_for_missing_subject(self):
placeholders = {'content': 'dummy content'}
with self.assertRaises(MissingSubjectError):
self.r.render(placeholders)
class TestHtmlRenderer(TestCase):
def _get_renderer(self, template_html, template_placeholders, **kwargs):
template = Template(
name='template_name',
styles_names=['template_style.css', 'template_style2.css'],
styles='',
content=template_html,
placeholders=template_placeholders,
type=None)
return renderer.HtmlRenderer(template, kwargs.get('email_locale', const.DEFAULT_LOCALE))
def setUp(self):
self.email_locale = 'locale'
config.init(_base_img_path='images_base')
def tearDown(self):
config.init()
def test_happy_path(self):
placeholders = {'content1': Placeholder('content1', 'text1')}
template = Template('dummy', [], '<style>body {}</style>', '<body>{{content1}}</body>', ['content1'], None)
r = renderer.HtmlRenderer(template, self.email_locale)
actual = r.render(placeholders)
self.assertEqual('<body><p>text1</p></body>', actual)
def test_variant(self):
placeholders = {'content1': Placeholder('content1', 'text1', variants={'B': 'text2'})}
template = Template('dummy', [], '<style>body {}</style>', '<body>{{content1}}</body>', ['content1'], None)
r = renderer.HtmlRenderer(template, self.email_locale)
actual = r.render(placeholders, variant='B')
self.assertEqual('<body><p>text2</p></body>', actual)
def test_empty_style(self):
placeholders = {'content': Placeholder('content', 'dummy_content')}
template = Template('dummy', [], '', '<body>{{content}}</body>', ['content1'], None)
r = renderer.HtmlRenderer(template, self.email_locale)
actual = r.render(placeholders)
self.assertEqual('<body><p>dummy_content</p></body>', actual)
def test_include_base_url(self):
template = Template('dummy', [], '<style>body {}</style>', '<body>{{base_url}}</body>', ['base_url'], None)
placeholders = {}
r = renderer.HtmlRenderer(template, self.email_locale)
actual = r.render(placeholders)
self.assertEqual('<body>images_base</body>', actual)
def test_fail_on_missing_placeholders(self):
template = Template('dummy', [], '<style>body {}</style>', '<body>{{content}}{{missing}}</body>',
['content', 'missing'], None)
r = renderer.HtmlRenderer(template, self.email_locale)
placeholders = {'content': Placeholder('content', 'dummy_content')}
with self.assertRaises(MissingTemplatePlaceholderError):
r.render(placeholders)
def test_rtl_locale(self):
email_locale = 'he'
template = Template('dummy', [], '<style>body {}</style>', '<body>{{content}}</body>', ['content'], None)
r = renderer.HtmlRenderer(template, email_locale)
placeholders = {'content': Placeholder('content', 'dummy_content')}
actual = r.render(placeholders)
self.assertEqual('<body dir="rtl">\n <p>\n dummy_content\n </p>\n</body>', actual)
def test_rtl_two_placeholders(self):
email_locale = 'ar'
template = Template('dummy', [], '<style>body {}</style>',
'<body><div>{{content1}}</div><div>{{content2}}</div></body>', ['content1', 'content2'],
None)
r = renderer.HtmlRenderer(template, email_locale)
placeholders = {
'content1': Placeholder('content1', 'dummy_content1'),
'content2': Placeholder('content2', 'dummy_content2')
}
actual = r.render(placeholders)
expected = '<body dir="rtl">\n <div>\n <p>\n dummy_content1\n </p>\n </div>\n <div>\n <p>\n dummy_content2\n </p>\n </div>\
\n</body>'
self.assertEqual(expected, actual)
def test_inline_styles(self):
template = Template('dummy', [], '<style>p {color:red;}</style>', '<body>{{content}}</body>', ['content'], None)
r = renderer.HtmlRenderer(template, self.email_locale)
placeholders = {'content': Placeholder('content', 'dummy_content')}
actual = r.render(placeholders)
self.assertEqual('<body><p style="color: red">dummy_content</p></body>', actual)
@patch('email_parser.fs.read_file')
def test_no_tracking(self, mock_read):
html = '<body>{{content}}</body>'
html_placeholders = ['content']
placeholders = {'content': Placeholder('content', '[link_title](!http://link.com)', True, False)}
mock_read.side_effect = iter([''])
expected = """<body><p>
<a clicktracking="off" href="http://link.com">link_title</a>
</p></body>"""
r = self._get_renderer(html, html_placeholders)
actual = r.render(placeholders)
self.assertEqual(expected, actual)
def test_empty_placeholders_rendering(self):
template = Template('dummy', [], '<style>p {color:red;}</style>', '<body>{{content}}</body>', ['content'], None)
r = renderer.HtmlRenderer(template, self.email_locale)
placeholders = {'content': Placeholder('content', '')}
actual = r.render(placeholders)
self.assertEqual('<body></body>', actual)
def test_transform_extended_tags(self):
content = '<body>{{bitmap:MY_BITMAP:max-width=160;max-height=160;}}</body>'
expected = '<body>{{MY_BITMAP}}</body>'
result = renderer._transform_extended_tags(content)
self.assertEqual(result, expected)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code for training the prediction model."""
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from prediction_input_flo_sintel_test import build_tfrecord_input, DATA_DIR
from prediction_model_flo_chair_ip import construct_model
from visualize import plot_flo_learn_symm, plot_general
from optical_flow_warp import transformer
from optical_flow_warp_fwd import transformerFwd
from flowlib import write_flow
import os
# How often to record tensorboard summaries.
SUMMARY_INTERVAL = 20
# How often to run a batch through the validation model.
VAL_INTERVAL = 200
# How often to save a model checkpoint
SAVE_INTERVAL = 500
FLAGS = flags.FLAGS
flags.DEFINE_string('data_dir', DATA_DIR, 'directory containing data.')
flags.DEFINE_string('output_dir', "", 'directory for model checkpoints.')
flags.DEFINE_integer('num_iterations', 100000, 'number of training iterations.')
flags.DEFINE_string('pretrained_model', '',
'filepath of a pretrained model to initialize from.')
flags.DEFINE_float('train_val_split', 0.95,
'The percentage of files to use for the training set,'
' vs. the validation set.')
flags.DEFINE_integer('batch_size', 32, 'batch size for training')
flags.DEFINE_float('learning_rate', 0.001,
'the base learning rate of the generator')
flags.DEFINE_integer('num_gpus', 1,
'the number of gpu to use')
def get_black_list(clses):
blacklist = []
for cls in clses:
fname = "/home/wangyang59/Data/ILSVRC2016/ImageSets/VID/train_%s.txt" % cls
with open(fname) as f:
content = f.readlines()
blacklist += [x.split(" ")[0].split("/")[-1] + ".tfrecord" for x in content]
return blacklist
## Helper functions
def peak_signal_to_noise_ratio(true, pred):
"""Image quality metric based on maximal signal power vs. power of the noise.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
peak signal to noise ratio (PSNR)
"""
return 10.0 * tf.log(1.0 / mean_squared_error(true, pred)) / tf.log(10.0)
def mean_squared_error(true, pred):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
"""
return tf.reduce_sum(tf.square(true - pred)) / tf.to_float(tf.size(pred))
def mean_charb_error(true, pred, beta):
return tf.reduce_sum(tf.sqrt((tf.square(beta*(true-pred)) + 0.001*0.001))) / tf.to_float(tf.size(pred))
def mean_charb_error_wmask(true, pred, mask, beta):
return tf.reduce_sum(tf.sqrt((tf.square(beta*(true-pred)) + 0.001*0.001))*mask) / tf.to_float(tf.size(pred))
def weighted_mean_squared_error(true, pred, weight):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
"""
tmp = tf.reduce_sum(weight*tf.square(true-pred), axis=[1,2], keep_dims=True) / tf.reduce_sum(weight, axis=[1, 2], keep_dims=True)
return tf.reduce_mean(tmp)
#return tf.reduce_sum(tf.square(true - pred)*weight) / tf.to_float(tf.size(pred))
#return tf.reduce_sum(tf.square(true - pred)*weight) / tf.reduce_sum(weight)
def mean_L1_error(true, pred):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
"""
return tf.reduce_sum(tf.abs(true - pred)) / tf.to_float(tf.size(pred))
def weighted_mean_L1_error(true, pred, weight):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
"""
return tf.reduce_sum(tf.abs(true - pred)*weight) / tf.to_float(tf.size(pred))
def gradient_x(img):
gx = img[:,:,:-1,:] - img[:,:,1:,:]
return gx
def gradient_y(img):
gy = img[:,:-1,:,:] - img[:,1:,:,:]
return gy
def cal_grad_error(flo, image, beta):
"""Calculate the gradient of the given image by calculate the difference between nearby pixels
"""
error = 0.0
img_grad_x = gradient_x(image)
img_grad_y = gradient_y(image)
weights_x = tf.exp(-10.0*tf.reduce_mean(tf.abs(img_grad_x), 3, keep_dims=True))
weights_y = tf.exp(-10.0*tf.reduce_mean(tf.abs(img_grad_y), 3, keep_dims=True))
error += mean_charb_error_wmask(flo[:, 1:, :, :], flo[:, :-1, :, :], weights_y, beta)
error += mean_charb_error_wmask(flo[:, :, 1:, :], flo[:, :, :-1, :], weights_x, beta)
return error / 2.0
def img_grad_error(true, pred, mask, beta):
error = 0.0
error += mean_charb_error_wmask(true[:, 1:, :, :] - true[:, :-1, :, :],
pred[:, 1:, :, :] - pred[:, :-1, :, :], mask[:, 1:, :, :], beta)
error += mean_charb_error_wmask(true[:, :, 1:, :] - true[:, :, :-1, :],
pred[:, :, 1:, :] - pred[:, :, :-1, :], mask[:, :, 1:, :], beta)
return error / 2.0
def cal_epe(flo1, flo2):
return tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(flo1 - flo2), axis=3)))
def blur(image):
batch_size, img_height, img_width, color_channels = map(int, image.get_shape()[0:4])
kernel = np.array([1., 2., 1., 2., 4., 2., 1., 2., 1.], dtype=np.float32) / 16.0
kernel = kernel.reshape((3, 3, 1, 1))
kernel = tf.constant(kernel, shape=(3, 3, 1, 1),
name='gaussian_kernel', verify_shape=True)
blur_image = tf.nn.depthwise_conv2d(tf.pad(image, [[0,0], [1,1], [1,1],[0,0]], "SYMMETRIC"), tf.tile(kernel, [1, 1, color_channels, 1]),
[1, 1, 1, 1], 'VALID')
return blur_image
def down_sample(image, to_blur=True):
batch_size, img_height, img_width, color_channels = map(int, image.get_shape()[0:4])
if to_blur:
image = blur(image)
return tf.image.resize_bicubic(image, [img_height/2, img_width/2])
def get_pyrimad(image):
image2 = down_sample(down_sample(image))
image3 = down_sample(image2)
image4 = down_sample(image3)
image5 = down_sample(image4)
image6 = down_sample(image5)
# image2 = tf.image.resize_area(image, [img_height/4, img_width/4])
# image3 = tf.image.resize_area(image, [img_height/8, img_width/8])
# image4 = tf.image.resize_area(image, [img_height/16, img_width/16])
# image5 = tf.image.resize_area(image, [img_height/32, img_width/32])
# image6 = tf.image.resize_area(image, [img_height/64, img_width/64])
return image2, image3, image4, image5, image6
def get_channel(image):
zeros = tf.zeros_like(image)
ones = tf.ones_like(image)
#gray = 0.21*image[:, :, :, 0] + 0.72*image[:, :, :, 1] + 0.07*image[:, :, :, 2]
channels = []
for i in range(10):
channels.append(tf.where(tf.logical_and(image >= i/10.0, image < (i+1)/10.0), ones, zeros))
return tf.concat([image]+channels, axis=3)
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
class Model(object):
def __init__(self,
image1=None,
image2=None,
reuse_scope=False,
scope=None,
prefix="train"):
#self.prefix = prefix = tf.placeholder(tf.string, [])
self.iter_num = tf.placeholder(tf.float32, [])
summaries = []
batch_size, H, W, color_channels = map(int, image1.get_shape()[0:4])
# if not reuse_scope:
# image2_recon, feature2 = autoencoder(image2, trainable=False)
# else: # If it's a validation or test model.
# with tf.variable_scope(scope, reuse=True):
# image2_recon, feature2 = autoencoder(image2, trainable=False)
#
# with tf.variable_scope(scope, reuse=True):
# image1_recon, feature1 = autoencoder(image1, trainable=False)
image1_pyrimad = get_pyrimad(get_channel(image1))
image2_pyrimad = get_pyrimad(get_channel(image2))
image1_2, image1_3, image1_4, image1_5, image1_6 = image1_pyrimad
image2_2, image2_3, image2_4, image2_5, image2_6 = image2_pyrimad
if not reuse_scope:
flow2, flow3, flow4, flow5, flow6, image1_trans = construct_model(image1, image2, image1_pyrimad, image2_pyrimad)
else: # If it's a validation or test model.
with tf.variable_scope(scope, reuse=True):
flow2, flow3, flow4, flow5, flow6, image1_trans = construct_model(image1, image2, image1_pyrimad, image2_pyrimad)
with tf.variable_scope(scope, reuse=True):
flow2r, flow3r, flow4r, flow5r, flow6r, _ = construct_model(image2, image1, image2_pyrimad, image1_pyrimad)
occu_mask_6 = tf.clip_by_value(transformerFwd(tf.ones(shape=[batch_size, H/64, W/64, 1], dtype='float32'),
20*flow6r/64.0, [H/64, W/64]),
clip_value_min=0.0, clip_value_max=1.0)
occu_mask_5 = tf.clip_by_value(transformerFwd(tf.ones(shape=[batch_size, H/32, W/32, 1], dtype='float32'),
20*flow5r/32.0, [H/32, W/32]),
clip_value_min=0.0, clip_value_max=1.0)
occu_mask_4 = tf.clip_by_value(transformerFwd(tf.ones(shape=[batch_size, H/16, W/16, 1], dtype='float32'),
20*flow4r/16.0, [H/16, W/16]),
clip_value_min=0.0, clip_value_max=1.0)
occu_mask_3 = tf.clip_by_value(transformerFwd(tf.ones(shape=[batch_size, H/8, W/8, 1], dtype='float32'),
20*flow3r/8.0, [H/8, W/8]),
clip_value_min=0.0, clip_value_max=1.0)
occu_mask_2 = tf.clip_by_value(transformerFwd(tf.ones(shape=[batch_size, H/4, W/4, 1], dtype='float32'),
20*flow2r/4.0, [H/4, W/4]),
clip_value_min=0.0, clip_value_max=1.0)
image1_2p, image1_3p, image1_4p, image1_5p, image1_6p = image1_trans
loss6 = mean_charb_error_wmask(image1_6, image1_6p, occu_mask_6, 1.0)
loss5 = mean_charb_error_wmask(image1_5, image1_5p, occu_mask_5, 1.0)
loss4 = mean_charb_error_wmask(image1_4, image1_4p, occu_mask_4, 1.0)
loss3 = mean_charb_error_wmask(image1_3, image1_3p, occu_mask_3, 1.0)
loss2 = mean_charb_error_wmask(image1_2, image1_2p, occu_mask_2, 1.0)
grad_error6 = cal_grad_error(flow6, image1_6[:,:,:,0:3], 1.0/64.0)
grad_error5 = cal_grad_error(flow5, image1_5[:,:,:,0:3], 1.0/32.0)
grad_error4 = cal_grad_error(flow4, image1_4[:,:,:,0:3], 1.0/16.0)
grad_error3 = cal_grad_error(flow3, image1_3[:,:,:,0:3], 1.0/8.0)
grad_error2 = cal_grad_error(flow2, image1_2[:,:,:,0:3], 1.0/4.0)
img_grad_error6 = img_grad_error(image1_6p, image1_6, occu_mask_6, 1.0)
img_grad_error5 = img_grad_error(image1_5p, image1_5, occu_mask_5, 1.0)
img_grad_error4 = img_grad_error(image1_4p, image1_4, occu_mask_4, 1.0)
img_grad_error3 = img_grad_error(image1_3p, image1_3, occu_mask_3, 1.0)
img_grad_error2 = img_grad_error(image1_2p, image1_2, occu_mask_2, 1.0)
# feature1_6_norm = tf.nn.l2_normalize(feature1[4], dim=3)
# feature1_6p = transformer(tf.nn.l2_normalize(feature2[4], dim=3), 20*flow6/64.0, [H/64, W/64], feature1_6_norm)
# loss6f = mean_charb_error_wmask(feature1_6_norm, feature1_6p, occu_mask_6, 10.0)
#
# feature1_5_norm = tf.nn.l2_normalize(feature1[3], dim=3)
# feature1_5p = transformer(tf.nn.l2_normalize(feature2[3], dim=3), 20*flow5/32.0, [H/32, W/32], feature1_5_norm)
# loss5f = mean_charb_error_wmask(feature1_5_norm, feature1_5p, occu_mask_5, 10.0)
#
# #feature1_5p = transformer_old(feature2[3], 20*flow5/32.0, [H/32, W/32])
# # with tf.variable_scope(scope, reuse=True):
# # image1_recon = decoder(feature1_6p, reuse_scope=True, trainable=True)
# #image1_recon2 = decoder(feature1_5p, reuse_scope=True, trainable=TruH=e, level=5)
#
# loss_ae = mean_charb_error(image1_recon, image1, 1.0) + mean_charb_error(image2, image2_recon, 1.0) + loss5f + loss6f
#
# summaries.append(tf.summary.scalar(prefix + '_loss_ae', loss_ae))
# summaries.append(tf.summary.scalar(prefix + '_loss6f', loss6f))
# summaries.append(tf.summary.scalar(prefix + '_loss5f', loss5f))
# loss = 0.05*(loss2+img_grad_error2) + 0.1*(loss3+img_grad_error3) + \
# 0.2*(loss4+img_grad_error4) + 0.8*(loss5+img_grad_error5) + 3.2*(loss6+img_grad_error6) + \
# (0.05*grad_error2 + 0.1*grad_error3 + 0.2*grad_error4 + 0.0*grad_error5 + 0.0*grad_error6)*10.0
loss = 1.0*(loss2+img_grad_error2) + 1.0*(loss3+img_grad_error3) + \
1.0*(loss4+img_grad_error4) + 1.0*(loss5+img_grad_error5) + 1.0*(loss6+img_grad_error6) + \
(1.0*grad_error2 + 1.0*grad_error3 + 1.0*grad_error4 + 1.0*grad_error5 + 1.0*grad_error6)*10.0
# loss = 3.2*(loss2+img_grad_error2) + 0.8*(loss3+img_grad_error3) + \
# 0.2*(loss4+img_grad_error4) + 0.1*(loss5+img_grad_error5) + 0.05*(loss6+img_grad_error6) + \
# (3.2*grad_error2 + 0.8*grad_error3 + 0.2*grad_error4 + 0.1*grad_error5 + 0.05*grad_error6)*10.0
self.loss = loss
summaries.append(tf.summary.scalar(prefix + '_loss', self.loss))
summaries.append(tf.summary.scalar(prefix + '_loss2', loss2))
summaries.append(tf.summary.scalar(prefix + '_loss3', loss3))
summaries.append(tf.summary.scalar(prefix + '_loss4', loss4))
summaries.append(tf.summary.scalar(prefix + '_loss5', loss5))
summaries.append(tf.summary.scalar(prefix + '_loss6', loss6))
summaries.append(tf.summary.scalar(prefix + '_grad_loss2', grad_error2))
summaries.append(tf.summary.scalar(prefix + '_grad_loss3', grad_error3))
summaries.append(tf.summary.scalar(prefix + '_grad_loss4', grad_error4))
summaries.append(tf.summary.scalar(prefix + '_grad_loss5', grad_error5))
summaries.append(tf.summary.scalar(prefix + '_grad_loss6', grad_error6))
self.summ_op = tf.summary.merge(summaries)
class Model_eval(object):
def __init__(self,
image1=None,
image2=None,
scene=None,
image_no=None,
scope=None,
prefix="eval"):
#self.prefix = prefix = tf.placeholder(tf.string, [])
self.iter_num = tf.placeholder(tf.float32, [])
summaries = []
self.scene = scene
self.image_no = image_no
batch_size, H, W, color_channels = map(int, image1.get_shape()[0:4])
image1_pyrimad = get_pyrimad(get_channel(image1))
image2_pyrimad = get_pyrimad(get_channel(image2))
image1_2, image1_3, image1_4, image1_5, image1_6 = image1_pyrimad
image2_2, image2_3, image2_4, image2_5, image2_6 = image2_pyrimad
with tf.variable_scope(scope, reuse=True):
flow2, flow3, flow4, flow5, flow6, image1_trans = construct_model(image1, image2, image1_pyrimad, image2_pyrimad)
with tf.variable_scope(scope, reuse=True):
flow2r, flow3r, flow4r, flow5r, flow6r, _ = construct_model(image2, image1, image2_pyrimad, image1_pyrimad)
image1_2p, image1_3p, image1_4p, image1_5p, image1_6p = image1_trans
occu_mask_2 = tf.clip_by_value(transformerFwd(tf.ones(shape=[batch_size, H/4, W/4, 1], dtype='float32'),
20*flow2r/4.0, [H/4, W/4]),
clip_value_min=0.0, clip_value_max=1.0)
# with tf.variable_scope(scope, reuse=True):
# image2_recon, feature2 = autoencoder(image2, reuse_scope=True, trainable=False)
#
# feature1_6p = transformer_old(feature2[4], 20*flow6/64.0, [H/64, W/64])
# with tf.variable_scope(scope, reuse=True):
# image1_recon = decoder(feature1_6p, reuse_scope=True, trainable=False)
#feature1_5p = transformer_old(feature2[3], 20*flow5/32.0, [H/32, W/32])
#image1_recon2 = decoder(feature1_5p, reuse_scope=True, trainable=True, level=5)
# loss_ae = mean_charb_error(image1, image1_recon, 1.0) + mean_charb_error(image2, image2_recon, 1.0)
# self.image_ae = [image1, image2, image1_recon, image2_recon]
# summaries.append(tf.summary.scalar(prefix + '_loss_ae', loss_ae))
self.flow2_scale = tf.image.resize_bicubic(20*tf.concat([flow2[:,:,:,0:1], flow2[:,:,:,1:2]/448.0*436.0], axis=3), [436, 1024])
def plot_all(model, itr, sess, feed_dict):
orig_image1, true_flo, pred_flo, true_warp, pred_warp, pred_flo_r, occu_mask, occu_mask_test, small_scales = sess.run([model.orig_image1,
model.true_flo,
model.pred_flo,
model.true_warp,
model.pred_warp,
model.pred_flo_r,
model.occu_mask,
model.occu_mask_test,
model.small_scales],
feed_dict)
plot_flo_learn_symm(orig_image1, true_flo, pred_flo, true_warp, pred_warp,
pred_flo_r, occu_mask, occu_mask_test, output_dir=FLAGS.output_dir, itr=itr)
plot_general(small_scales, h=6, w=3, output_dir=FLAGS.output_dir, itr=itr, suffix="small")
#plot_general(image_ae, h=2, w=2, output_dir=FLAGS.output_dir, itr=itr, suffix="ae")
def main(unused_argv):
if FLAGS.output_dir == "":
raise Exception("OUT_DIR must be specified")
if os.path.exists(FLAGS.output_dir):
raise Exception("OUT_DIR already exist")
print 'Constructing models and inputs.'
with tf.Graph().as_default(), tf.device('/cpu:0'):
train_op = tf.train.AdamOptimizer(FLAGS.learning_rate)
tower_grads = []
itr_placeholders = []
image1, image2= build_tfrecord_input(training=True)
split_image1 = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=image1)
split_image2 = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=image2)
eval_image1, eval_image2, scenes, image_no = build_tfrecord_input(training=False, num_epochs=1)
summaries_cpu = tf.get_collection(tf.GraphKeys.SUMMARIES, tf.get_variable_scope().name)
with tf.variable_scope(tf.get_variable_scope()) as vs:
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
if i == FLAGS.num_gpus - 1:
scopename = "model"
else:
scopename = '%s_%d' % ("tower", i)
with tf.name_scope(scopename) as ns:
if i == 0:
model = Model(split_image1[i], split_image2[i], reuse_scope=False, scope=vs)
else:
model = Model(split_image1[i], split_image2[i], reuse_scope=True, scope=vs)
loss = model.loss
# Retain the summaries from the final tower.
if i == FLAGS.num_gpus - 1:
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, ns)
eval_model = Model_eval(eval_image1, eval_image2, scenes, image_no, scope=vs)
# Calculate the gradients for the batch of data on this CIFAR tower.
grads = train_op.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
itr_placeholders.append(model.iter_num)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
# Apply the gradients to adjust the shared variables.
apply_gradient_op = train_op.apply_gradients(grads)
# Create a saver.
saver = tf.train.Saver(
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), max_to_keep=5)
# saver1 = tf.train.Saver(
# list(set(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES))-set(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=".*ae.*"))), max_to_keep=5)
#
# saver2 = tf.train.Saver(
# tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=".*ae.*"), max_to_keep=5)
# Build the summary operation from the last tower summaries.
summary_op = tf.summary.merge(summaries + summaries_cpu)
# Make training session.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False))
summary_writer = tf.summary.FileWriter(
FLAGS.output_dir, graph=sess.graph, flush_secs=10)
if FLAGS.pretrained_model:
saver.restore(sess, FLAGS.pretrained_model)
#saver2.restore(sess, "./tmp/flow_exp/flow_learn_chair_copy_ae_bal/model65002")
#sess.run(tf.initialize_variables(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=".*ae.*")))
#start_itr = int(FLAGS.pretrained_model.split("/")[-1][5:])
start_itr = 0
sess.run(tf.local_variables_initializer())
else:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
start_itr = 0
tf.train.start_queue_runners(sess)
# Run training.
for itr in range(start_itr, FLAGS.num_iterations):
# Generate new batch of data.
feed_dict = {x:np.float32(itr) for x in itr_placeholders}
flow2_scale, scene, image_no = sess.run([eval_model.flow2_scale, eval_model.scene, eval_model.image_no])
for i in range(len(flow2_scale)):
if not os.path.exists(os.path.join(FLAGS.output_dir, scene[i][0])):
os.makedirs(os.path.join(FLAGS.output_dir, scene[i][0]))
write_flow(flow2_scale[i], os.path.join(FLAGS.output_dir, scene[i][0], image_no[i][0] + ".flo"))
if __name__ == '__main__':
app.run()
|
|
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module to for running Cirq objects."""
import collections
import numpy as np
import cirq
# TODO (#563): Remove this workaround class once cirq.PauliSumCollector can
# be used end to end with engine. This current issue is that
# cirq.PauliSumCollector does not produce serializable gates for basis
# conversion.
class TFQPauliSumCollector(cirq.work.collector.Collector):
"""Copy of cirq.PauliSumCollector with some fixes to work with engine."""
def __init__(self,
circuit,
observable,
*,
samples_per_term,
max_samples_per_job=1000000):
observable = cirq.PauliSum.wrap(observable)
self._circuit = circuit
self._samples_per_job = max_samples_per_job
self._pauli_coef_terms = [
(p / p.coefficient, p.coefficient) for p in observable if p
]
self._identity_offset = 0
for p in observable:
if not p:
self._identity_offset += p.coefficient
self._zeros = collections.defaultdict(lambda: 0)
self._ones = collections.defaultdict(lambda: 0)
self._samples_per_term = samples_per_term
self._total_samples_requested = 0
def next_job(self):
"""Get the next job."""
i = self._total_samples_requested // self._samples_per_term
if i >= len(self._pauli_coef_terms):
return None
pauli, _ = self._pauli_coef_terms[i]
remaining = self._samples_per_term * (i +
1) - self._total_samples_requested
amount_to_request = min(remaining, self._samples_per_job)
self._total_samples_requested += amount_to_request
return cirq.work.collector.CircuitSampleJob(
circuit=_fixed_circuit_plus_pauli_string_measurements(
self._circuit, pauli),
repetitions=amount_to_request,
tag=pauli)
def on_job_result(self, job, result):
"""Post process the `job` and `result` you have."""
job_id = job.tag
parities = result.histogram(key='out',
fold_func=lambda bits: np.sum(bits) % 2)
self._zeros[job_id] += parities[0]
self._ones[job_id] += parities[1]
def collect(self, sampler):
"""Synchronus collect."""
# See #562, this is a workaround to an event loop issue in the tutorials
# see also:
# https://stackoverflow.com/questions/55409641/asyncio-run-cannot-be-called-from-a-running-event-loop
while True:
next_job = self.next_job()
if next_job is None:
return
bitstrings = sampler.run(next_job.circuit,
repetitions=next_job.repetitions)
self.on_job_result(next_job, bitstrings)
def estimated_energy(self):
"""Sums up the sampled expectations, weighted by their coefficients."""
energy = 0j
for pauli_string, coef in self._pauli_coef_terms:
a = self._zeros[pauli_string]
b = self._ones[pauli_string]
if a + b:
energy += coef * (a - b) / (a + b)
energy = complex(energy)
if energy.imag == 0:
energy = energy.real
energy += self._identity_offset
return energy
def _fixed_circuit_plus_pauli_string_measurements(circuit, pauli_string):
"""A circuit measuring the given observable at the end of the given circuit.
"""
assert pauli_string
circuit = circuit.copy()
# Uses cirq.SingleQubitCliffordGates which aren't serializable by engine in
# cirq 0.6. This is a workaround until fixed.
# circuit.append(cirq.Moment(pauli_string.to_z_basis_ops()))
circuit.append(cirq.Moment(cirq.decompose(pauli_string.to_z_basis_ops())))
circuit.append(
cirq.Moment([cirq.measure(*sorted(pauli_string.keys()), key='out')]))
return circuit
def _validate_inputs(circuits, param_resolvers, simulator, sim_type):
"""Type check and sanity check inputs."""
if not isinstance(circuits, (list, tuple, np.ndarray)):
raise TypeError('circuits must be a list or array.'
' Given: {}'.format(type(circuits)))
if any(not isinstance(x, cirq.Circuit) for x in circuits):
raise TypeError('circuits must contain cirq.Circuit objects')
if not isinstance(param_resolvers, (list, tuple, np.ndarray)):
raise TypeError('param_resolvers must be a list or array.'
' Given: {}'.format(type(param_resolvers)))
if any(not isinstance(x, cirq.ParamResolver) for x in param_resolvers):
raise TypeError('param_resolvers must contain cirq.ParamResolvers.')
if not (len(circuits) == len(param_resolvers)):
raise ValueError('Circuit batch size does not match resolve batch '
'size.')
if sim_type == 'analytic':
if not isinstance(simulator, cirq.SimulatesFinalState):
raise TypeError('For analytic operations only'
' cirq.SimulatesFinalState'
' is required. Given: {}'.format(type(simulator)))
elif sim_type == 'expectation':
if not isinstance(simulator,
(cirq.sim.simulator.SimulatesExpectationValues,
cirq.DensityMatrixSimulator)):
# TODO(zaqqwerty): remove DM sim check once cirq #3964 is resolved.
raise TypeError('For expectation operations a '
'cirq.sim.simulator.SimulatesExpectationValues '
'or cirq.DensityMatrixSimulator'
'is required. Given: {}'.format(type(simulator)))
elif sim_type == 'sample':
if not isinstance(simulator, cirq.Sampler):
raise TypeError('For sample based operations a cirq.Sampler is '
'required. Given: {}'.format(type(simulator)))
else:
raise ValueError('Invalid simulator type specified.')
def _check_empty(circuits):
"""Returns true if circuits is the empty tensor."""
return len(circuits) == 0
def batch_calculate_state(circuits, param_resolvers, simulator):
"""Compute states from a batch of circuits.
Returns a NumPy array containing the final circuit state for each
`cirq.Circuit` in `circuits`, given that the corresponding
`cirq.ParamResolver` in `param_resolvers` was used to resolve any symbols
in it. If simulator is a `cirq.DensityMatrixSimulator` this final state will
be a density matrix, else this final state will be a state vector. More
specifically, for a given `i`, `batch_calculate_state` will use
`param_resolvers[i]` to resolve the symbols in `circuits[i]` and then place
the final state in the return list at index `i`.
Args:
circuits: Python `list` of `cirq.Circuit`s.
param_resolvers: Python `list` of `cirq.ParamResolver`s, where
`param_resolvers[i]` is the resolver to be used with `circuits[i]`.
simulator: Simulator object. Can be any `cirq.SimulatesFinalState`;
if `simulator` is not a `cirq.DensityMatrixSimulator`, this function
assumes all final states are dense state vectors.
Returns:
`np.ndarray` containing the resulting state information. In the case of
`cirq.DensityMatrixSimulator` the shape is
[len(circuits), <size of biggest state>, <size of biggest state>], else
the shape is [len(circuits), <size of biggest state>].
"""
_validate_inputs(circuits, param_resolvers, simulator, 'analytic')
if _check_empty(circuits):
empty_ret = np.zeros((0, 0), dtype=np.complex64)
if isinstance(simulator, cirq.DensityMatrixSimulator):
empty_ret = np.zeros((0, 0, 0), dtype=np.complex64)
return empty_ret
biggest_circuit = max(len(circuit.all_qubits()) for circuit in circuits)
# Default to state vector unless we see densitymatrix.
return_mem_shape = (len(circuits), 1 << biggest_circuit)
post_process = lambda x: x.final_state_vector
if isinstance(simulator, cirq.DensityMatrixSimulator):
return_mem_shape = (len(circuits), 1 << biggest_circuit,
1 << biggest_circuit)
post_process = lambda x: x.final_density_matrix
batch_states = np.ones(return_mem_shape, dtype=np.complex64) * -2
for index, (program, param) in enumerate(zip(circuits, param_resolvers)):
result = simulator.simulate(program, param)
state_size = 1 << len(program.all_qubits())
state = post_process(result).astype(np.complex64)
sub_index = (slice(None, state_size, 1),) * (batch_states.ndim - 1)
batch_states[index][sub_index] = state
return batch_states
def batch_calculate_expectation(circuits, param_resolvers, ops, simulator):
"""Compute expectations from a batch of circuits.
Returns a `np.ndarray` containing the expectation values of `ops`
applied to a specific circuit in `circuits`, given that the
corresponding `cirq.ParamResolver` in `param_resolvers` was used to resolve
any symbols in the circuit. Specifically the returned array at index `i,j`
will be equal to the expectation value of `ops[i][j]` on `circuits[i]` with
`param_resolvers[i]` used to resolve any symbols in `circuits[i]`.
Expectation calculations will be carried out using the simulator object.
Args:
circuits: Python `list` of `cirq.Circuit`s.
param_resolvers: Python `list` of `cirq.ParamResolver`s, where
`param_resolvers[i]` is the resolver to be used with `circuits[i]`.
ops: 2d Python `list` of `cirq.PauliSum` objects where `ops[i][j]` will
be used to calculate the expectation on `circuits[i]` for all `j`,
after `param_resolver[i]` is used to resolve any parameters
in the circuit.
simulator: Simulator object. Must inherit
`cirq.sim.simulator.SimulatesExpectationValues` or
`cirq.DensityMatrixSimulator`.
Returns:
`np.ndarray` containing the expectation values. Shape is:
[len(circuits), len(ops[0])]
"""
_validate_inputs(circuits, param_resolvers, simulator, 'expectation')
if _check_empty(circuits):
return np.zeros((0, 0), dtype=np.float32)
if not isinstance(ops, (list, tuple, np.ndarray)):
raise TypeError('ops must be a list or array.'
' Given: {}'.format(type(ops)))
if len(ops) != len(circuits):
raise ValueError('Shape of ops and circuits do not match.')
for sub_list in ops:
if not isinstance(sub_list, (list, tuple, np.ndarray)):
raise TypeError('elements of ops must be type list.')
for x in sub_list:
if not isinstance(x, cirq.PauliSum):
raise TypeError('ops must contain only cirq.PauliSum objects.'
' Given: {}'.format(type(x)))
all_exp_vals = np.ones(shape=(len(circuits), len(ops[0])),
dtype=np.float32) * -2
for i, (c, p, op_row) in enumerate(zip(circuits, param_resolvers, ops)):
# Convention in TFQ is to set expectations of empty circuits to -2.
if len(c) == 0:
continue
# TODO(zaqqwerty): remove DM sim check once cirq #3964 is resolved.
if isinstance(simulator, cirq.DensityMatrixSimulator):
qubits = c.all_qubits()
pairs = zip(sorted(qubits), list(range(len(qubits))))
qubit_order = dict(pairs)
sim_result = simulator.simulate(c, p)
for j, op in enumerate(op_row):
dm = sim_result.final_density_matrix
all_exp_vals[i][j] = op.expectation_from_density_matrix(
dm, qubit_order, check_preconditions=False)
else:
# Valid observables always have real expectation values.
all_exp_vals[i] = np.real(
np.asarray(simulator.simulate_expectation_values(c, op_row, p)))
return all_exp_vals
def batch_calculate_sampled_expectation(circuits, param_resolvers, ops,
n_samples, sampler):
"""Compute expectations from sampling a batch of circuits.
Returns a `np.ndarray` containing the expectation values of `ops`
applied to a specific circuit in `circuits`, given that the
corresponding `cirq.ParamResolver` in `param_resolvers` was used to resolve
any symbols in the circuit. Specifically the returned array at index `i,j`
will be equal to the expectation value of `ops[i][j]` on `circuits[i]` with
`param_resolvers[i]` used to resolve any symbols in `circuits[i]`.
Expectation estimations will be carried out using the sampler object.
Expectations for ops[i][j] are estimated by drawing n_samples[i][j]
samples.
Args:
circuits: Python `list` of `cirq.Circuit`s.
param_resolvers: Python `list` of `cirq.ParamResolver`s, where
`param_resolvers[i]` is the resolver to be used with `circuits[i]`.
ops: 2d Python `list` of `cirq.PauliSum` objects where `ops[i][j]` will
be used to calculate the expectation on `circuits[i]` for all `j`,
after `param_resolver[i]` is used to resolve any parameters
in the circuit.
n_samples: 2d Python `list` of `int`s where `n_samples[i][j]` is
equal to the number of samples to draw in each term of `ops[i][j]`
when estimating the expectation.
sampler: Anything inheriting `cirq.Sampler`.
Returns:
`np.ndarray` containing the expectation values. Shape is:
[len(circuits), len(ops[0])]
"""
_validate_inputs(circuits, param_resolvers, sampler, 'sample')
if _check_empty(circuits):
return np.zeros((0, 0), dtype=np.float32)
if not isinstance(ops, (list, tuple, np.ndarray)):
raise TypeError('ops must be a list or array.'
' Given: {}'.format(type(ops)))
if len(ops) != len(circuits):
raise ValueError('Shape of ops and circuits do not match.')
if len(n_samples) != len(circuits):
raise ValueError('Shape of n_samples does not match circuits.')
for sub_list in n_samples:
if not isinstance(sub_list, (list, tuple, np.ndarray)):
raise TypeError('Elements of n_elements must be lists of ints.')
for x in sub_list:
if not isinstance(x, int):
raise TypeError('Non-integer value found in n_samples.')
if x <= 0:
raise ValueError('n_samples contains sample value <= 0.')
for sub_list in ops:
if not isinstance(sub_list, (list, tuple, np.ndarray)):
raise TypeError('elements of ops must be type list.')
for x in sub_list:
if not isinstance(x, cirq.PauliSum):
raise TypeError('ops must contain only cirq.PauliSum objects.'
' Given: {}'.format(type(x)))
all_exp_vals = np.full((len(circuits), len(ops[0])), -2, dtype=np.float32)
for c_index, (c, params) in enumerate(zip(circuits, param_resolvers)):
# (#679) Just ignore empty programs.
if len(c.all_qubits()) == 0:
continue
circuit = cirq.resolve_parameters(c, params)
for op_index, op in enumerate(ops[c_index]):
collector = TFQPauliSumCollector(
circuit, op, samples_per_term=n_samples[c_index][op_index])
collector.collect(sampler)
result = collector.estimated_energy().real
all_exp_vals[c_index][op_index] = result
return all_exp_vals
def batch_sample(circuits, param_resolvers, n_samples, simulator):
"""Sample from circuits.
Returns a `np.ndarray` containing n_samples samples from all the circuits in
circuits given that the corresponding `cirq.ParamResolver` in
`param_resolvers` was used to resolve any symbols. Specifically the
returned array at index `i,j` will correspond to a `np.ndarray` of
booleans representing bitstring `j` that was sampled from `circuits[i]`.
Samples are drawn using the provided simulator object (Currently supported
are `cirq.DensityMatrixSimulator` and `cirq.Simulator`).
Note: In order to keep numpy shape consistent, smaller circuits will
have sample bitstrings padded with -2 on "qubits that don't exist
in the circuit".
Args:
circuits: Python `list` of `cirq.Circuit`s.
param_resolvers: Python `list` of `cirq.ParamResolver`s, where
`param_resolvers[i]` is the resolver to be used with `circuits[i]`.
n_samples: `int` describing number of samples to draw from each
circuit.
simulator: Simulator object. Currently
supported are `cirq.DensityMatrixSimulator` and `cirq.Simulator`.
Returns:
`np.ndarray` containing the samples with invalid qubits blanked out.
It's shape is
[len(circuits), n_samples, <# qubits in largest circuit>].
circuits that are smaller than #qubits in largest circuit have null
qubits in bitstrings mapped to -2.
"""
_validate_inputs(circuits, param_resolvers, simulator, 'sample')
if _check_empty(circuits):
return np.zeros((0, 0, 0), dtype=np.int8)
if not isinstance(n_samples, int):
raise TypeError('n_samples must be an int.'
'Given: {}'.format(type(n_samples)))
if n_samples <= 0:
raise ValueError('n_samples must be > 0.')
biggest_circuit = max(len(circuit.all_qubits()) for circuit in circuits)
return_mem_shape = (len(circuits), n_samples, biggest_circuit)
return_array = np.ones(return_mem_shape, dtype=np.int8) * -2
for batch, (c, resolver) in enumerate(zip(circuits, param_resolvers)):
if len(c.all_qubits()) == 0:
continue
qb_keys = [(q, str(i)) for i, q in enumerate(sorted(c.all_qubits()))]
c_m = c + cirq.Circuit(cirq.measure(q, key=i) for q, i in qb_keys)
run_c = cirq.resolve_parameters(c_m, resolver)
bits = simulator.sample(run_c, repetitions=n_samples)
flat_m = bits[[x[1] for x in qb_keys]].to_numpy().astype(np.int8)
return_array[batch, :, biggest_circuit - len(qb_keys):] = flat_m
return return_array
|
|
import numpy as np
import glob
import sys
import random
class record_fetcher(object):
'''
creates feature arrays and labels from raw accelerometer/demographic data
no test/train splitting is included
methods:
- fetch(batch_size,minibatch_size,binary)
calculates summary statistics from raw accelerometer/demographic data and creates
input features and labels for lstm classifier
parameters:
- batch_size: integer
number of frames to use for each train/test instance
e.g. 1000 means each test/train instance represents 10 seconds of data
- minibatch_size: integer
number of frames to use for each set of summary statistics
e.g. 50 will calculate summary statistics over .5 second windows across each train/test instance
- binary: boolean (default True)
use True to set labels for ambulatory/non-ambulatory
use False to set labels for non-ambulatory/walking/running/upstairs/downstairs
outputs:
- numpy array representing summary statistics and demographic data over time
dimension 0 is the index of the batch window
dimension 1 is the index of the minibatch window
dimension 2 is the summary statistics and demographic data over each minibatch window
- numpy array representing activity label over each time window
'''
def __init__(self):
#categorize activity ids into ambulatory/non-ambulatory
self.dic1 = {
'ambulatory': [11,12,13,14,23,24,25,26,27,28,29,30,31,32,16,17,18,33,34],
'nonambulatory': [19,20,21,22]
}
#categorize activity ids into non-ambulatory/walking/running/upstairs/downstairs
self.dic2 = {
'nonambulatory': [19,20,21,22],
'walking': [11,12,13,14,23,24,25,26,27,28,29,30,31,32],
'running': [16,17,18],
'upstairs': [33],
'downstairs': [34]
}
#get filenames for all activity arrays
self.ambulatory = []
for i in self.dic1['ambulatory']:
self.ambulatory.extend(glob.glob('../data/arrays/*_%i_*' % i))
self.nonambulatory = []
for i in self.dic1['nonambulatory']:
self.nonambulatory.extend(glob.glob('../data/arrays/*_%i_*' % i))
self.walking = []
for i in self.dic2['walking']:
self.walking.extend(glob.glob('../data/arrays/*_%i_*' % i))
self.running = []
for i in self.dic2['running']:
self.running.extend(glob.glob('../data/arrays/*_%i_*' % i))
self.upstairs = []
for i in self.dic2['upstairs']:
self.upstairs.extend(glob.glob('../data/arrays/*_%i_*' % i))
self.downstairs = []
for i in self.dic2['downstairs']:
self.downstairs.extend(glob.glob('../data/arrays/*_%i_*' % i))
def fetch(self,batch_size,minibatch_size,binary=True):
'''
calculates summary statistics from raw accelerometer/demographic data and creates
input features and labels for lstm classifier
parameters:
- batch_size: integer
number of frames to use for each train/test instance
e.g. 1000 means each test/train instance represents 10 seconds of data
- minibatch_size: integer
number of frames to use for each set of summary statistics
e.g. 50 will calculate summary statistics over .5 second windows across each train/test instance
- binary: boolean (default True)
use True to set labels for ambulatory/non-ambulatory
use False to set labels for non-ambulatory/walking/running/upstairs/downstairs
outputs:
- numpy array representing summary statistics and demographic data over time
dimension 0 is the index of the batch window
dimension 1 is the index of the minibatch window
dimension is the summary statistics and demographic data over each minibatch window
- numpy array representing activity label over each time window
'''
X_list = []
y_list = []
batches = batch_size//minibatch_size
#for ambulatory/non-ambulatory classification
if binary:
for a in self.ambulatory:
print 'processing %s' % a
array = np.load(a)
#avoid arrays smaller than batch_size
if array.shape[0] <= batch_size:
continue
#separate array into batches
seg = array.shape[0]//batch_size
for i in range(seg):
batch = np.empty((0,109))
#separate batches into minitbatches and calculate summary satistics per minibatch
for j in range(batches):
subarray = array[i*batch_size+j*minibatch_size:(i+1)*batch_size+(j+1)*minibatch_size,:]
features = self._create_features(subarray)
batch = np.concatenate((batch,features),0)
X_list.append(batch)
#create label
y_list.append(np.array([0,1]))
for a in self.nonambulatory:
print 'processing %s' % a
array = np.load(a)
#avoid arrays smaller than batch_size
if array.shape[0] <= batch_size:
continue
#separate array into batches
seg = array.shape[0]//batch_size
for i in range(seg):
batch = np.empty((0,109))
#separate batches into minitbatches and calculate summary satistics per minibatch
for j in range(batches):
subarray = array[i*batch_size+j*minibatch_size:(i+1)*batch_size+(j+1)*minibatch_size,:]
features = self._create_features(subarray)
batch = np.concatenate((batch,features),0)
X_list.append(batch)
#create label
y_list.append(np.array([1,0]))
#for non-ambulatory/walking/running/upstairs/downstairs classification
else:
for a in self.nonambulatory:
print 'processing %s' % a
array = np.load(a)
#avoid arrays smaller than batch_size
if array.shape[0] <= batch_size:
continue
#separate array into batches
seg = array.shape[0]//batch_size
for i in range(seg):
batch = np.empty((0,109))
#separate batches into minitbatches and calculate summary satistics per minibatch
for j in range(batches):
subarray = array[i*batch_size+j*minibatch_size:(i+1)*batch_size+(j+1)*minibatch_size,:]
features = self._create_features(subarray)
batch = np.concatenate((batch,features),0)
X_list.append(batch)
#create label
y_list.append(np.array([1,0,0,0,0]))
for a in self.walking:
print 'processing %s' % a
array = np.load(a)
#avoid arrays smaller than batch_size
if array.shape[0] <= batch_size:
continue
#separate array into batches
seg = array.shape[0]//batch_size
for i in range(seg):
batch = np.empty((0,109))
#separate batches into minitbatches and calculate summary satistics per minibatch
for j in range(batches):
subarray = array[i*batch_size+j*minibatch_size:(i+1)*batch_size+(j+1)*minibatch_size,:]
features = self._create_features(subarray)
batch = np.concatenate((batch,features),0)
X_list.append(batch)
#create label
y_list.append(np.array([0,1,0,0,0]))
for a in self.running:
print 'processing %s' % a
array = np.load(a)
#avoid arrays smaller than batch_size
if array.shape[0] <= batch_size:
continue
#separate array into batches
seg = array.shape[0]//batch_size
for i in range(seg):
batch = np.empty((0,109))
#separate batches into minitbatches and calculate summary satistics per minibatch
for j in range(batches):
subarray = array[i*batch_size+j*minibatch_size:(i+1)*batch_size+(j+1)*minibatch_size,:]
features = self._create_features(subarray)
batch = np.concatenate((batch,features),0)
X_list.append(batch)
#create label
y_list.append(np.array([0,0,1,0,0]))
for a in self.upstairs:
print 'processing %s' % a
array = np.load(a)
#avoid arrays smaller than batch_size
if array.shape[0] <= batch_size:
continue
#separate array into batches
seg = array.shape[0]//batch_size
for i in range(seg):
batch = np.empty((0,109))
#separate batches into minitbatches and calculate summary satistics per minibatch
for j in range(batches):
subarray = array[i*batch_size+j*minibatch_size:(i+1)*batch_size+(j+1)*minibatch_size,:]
features = self._create_features(subarray)
batch = np.concatenate((batch,features),0)
X_list.append(batch)
#create label
y_list.append(np.array([0,0,0,1,0]))
for a in self.downstairs:
print 'processing %s' % a
array = np.load(a)
#avoid arrays smaller than batch_size
if array.shape[0] <= batch_size:
continue
#separate array into batches
seg = array.shape[0]//batch_size
for i in range(seg):
batch = np.empty((0,109))
#separate batches into minitbatches and calculate summary satistics per minibatch
for j in range(batches):
subarray = array[i*batch_size+j*minibatch_size:(i+1)*batch_size+(j+1)*minibatch_size,:]
features = self._create_features(subarray)
batch = np.concatenate((batch,features),0)
X_list.append(batch)
#create label
y_list.append(np.array([0,0,0,0,1]))
#pair X/y together and shuffle
print 'shuffling records'
Xy = zip(X_list,y_list)
random.shuffle(Xy)
#separate X from y
X = np.array([record[0] for record in Xy])
y = np.array([record[1] for record in Xy])
print 'feature vector shape:', X.shape
print 'label vector shape:', y.shape
return X, y
def _create_features(self,array):
'''
calculate summary statistics over time window
concatenate with normalized demographic data
the following features are calculated for each axis (X,Y,Z),
magnitude (sqrt of X^2+Y^2+Z^2), first differential of each axis,
and first differential of magnitude:
- mean, std, min, max
- 10,25,50,75,90 percentiles
- number of median crossings
- correlation with other axis
'''
#create features
mag = np.sqrt(array[:,0]**2+array[:,1]**2+array[:,2]**2)
x_mean = np.mean(array[:,0])
y_mean = np.mean(array[:,1])
z_mean = np.mean(array[:,2])
mag_mean = np.mean(mag)
x_std = np.std(array[:,0])
y_std = np.std(array[:,1])
z_std = np.std(array[:,2])
mag_std = np.std(mag)
x_10per = np.percentile(array[:,0],10)
x_25per = np.percentile(array[:,0],25)
x_50per = np.percentile(array[:,0],50)
x_75per = np.percentile(array[:,0],75)
x_90per = np.percentile(array[:,0],90)
x_med = np.median(array[:,0])
x_medcross = np.sum(np.diff((array[:,0]==x_med).astype(int))==1)
x_max = np.amax(array[:,0])
x_min = np.amin(array[:,0])
x_range = x_max - x_min
x_iqrange = x_75per - x_25per
y_10per = np.percentile(array[:,1],10)
y_25per = np.percentile(array[:,1],25)
y_50per = np.percentile(array[:,1],50)
y_75per = np.percentile(array[:,1],75)
y_90per = np.percentile(array[:,1],90)
y_med = np.median(array[:,1])
y_medcross = np.sum(np.diff((array[:,1]==y_med).astype(int))==1)
y_max = np.amax(array[:,1])
y_min = np.amin(array[:,1])
y_range = y_max - y_min
y_iqrange = y_75per - y_25per
z_10per = np.percentile(array[:,2],10)
z_25per = np.percentile(array[:,2],25)
z_50per = np.percentile(array[:,2],50)
z_75per = np.percentile(array[:,2],75)
z_90per = np.percentile(array[:,2],90)
z_med = np.median(array[:,2])
z_medcross = np.sum(np.diff((array[:,2]==z_med).astype(int))==1)
z_max = np.amax(array[:,2])
z_min = np.amin(array[:,2])
z_range = z_max - z_min
z_iqrange = z_75per - z_25per
mag_10per = np.percentile(mag,10)
mag_25per = np.percentile(mag,25)
mag_50per = np.percentile(mag,50)
mag_75per = np.percentile(mag,75)
mag_90per = np.percentile(mag,90)
mag_med = np.median(mag)
mag_medcross = np.sum(np.diff((mag==mag_med).astype(int))==1)
mag_max = np.amax(mag)
mag_min = np.amin(mag)
mag_range = mag_max - mag_min
mag_iqrange = mag_75per - mag_25per
xy_corr = np.correlate(array[:,0],array[:,1])
xz_corr = np.correlate(array[:,0],array[:,2])
yz_corr = np.correlate(array[:,1],array[:,2])
x_d1 = np.diff(array[:,0])
y_d1 = np.diff(array[:,1])
z_d1 = np.diff(array[:,2])
mag_d1 = np.diff(mag)
x_d1_mean = np.mean(x_d1)
y_d1_mean = np.mean(y_d1)
z_d1_mean = np.mean(z_d1)
mag_mean_d1 = np.mean(mag_d1)
x_d1_std = np.std(x_d1)
y_d1_std = np.std(y_d1)
z_d1_std = np.std(z_d1)
mag_std_d1 = np.std(mag_d1)
x_10per_d1 = np.percentile(x_d1,10)
x_25per_d1 = np.percentile(x_d1,25)
x_50per_d1 = np.percentile(x_d1,50)
x_75per_d1 = np.percentile(x_d1,75)
x_90per_d1 = np.percentile(x_d1,90)
x_med_d1 = np.median(x_d1)
x_medcross_d1 = np.sum(np.diff((x_d1==x_med_d1).astype(int))==1)
x_max_d1 = np.amax(x_d1)
x_min_d1 = np.amin(x_d1)
x_range_d1 = x_max_d1 - x_min_d1
x_iqrange_d1 = x_75per_d1 - x_25per_d1
y_10per_d1 = np.percentile(y_d1,10)
y_25per_d1 = np.percentile(y_d1,25)
y_50per_d1 = np.percentile(y_d1,50)
y_75per_d1 = np.percentile(y_d1,75)
y_90per_d1 = np.percentile(y_d1,90)
y_med_d1 = np.median(y_d1)
y_medcross_d1 = np.sum(np.diff((y_d1==y_med_d1).astype(int))==1)
y_max_d1 = np.amax(y_d1)
y_min_d1 = np.amin(y_d1)
y_range_d1 = y_max_d1 - y_min_d1
y_iqrange_d1 = y_75per_d1 - y_25per_d1
z_10per_d1 = np.percentile(z_d1,10)
z_25per_d1 = np.percentile(z_d1,25)
z_50per_d1 = np.percentile(z_d1,50)
z_75per_d1 = np.percentile(z_d1,75)
z_90per_d1 = np.percentile(z_d1,90)
z_med_d1 = np.median(z_d1)
z_medcross_d1 = np.sum(np.diff((z_d1==z_med_d1).astype(int))==1)
z_max_d1 = np.amax(z_d1)
z_min_d1 = np.amin(z_d1)
z_range_d1 = z_max_d1 - z_min_d1
z_iqrange_d1 = z_75per_d1 - z_25per_d1
mag_10per_d1 = np.percentile(mag_d1,10)
mag_25per_d1 = np.percentile(mag_d1,25)
mag_50per_d1 = np.percentile(mag_d1,50)
mag_75per_d1 = np.percentile(mag_d1,75)
mag_90per_d1 = np.percentile(mag_d1,90)
mag_med_d1 = np.median(mag_d1)
mag_medcross_d1 = np.sum(np.diff((mag_d1==mag_med_d1).astype(int))==1)
mag_max_d1 = np.amax(mag_d1)
mag_min_d1 = np.amin(mag_d1)
mag_range_d1 = mag_max_d1 - mag_min_d1
mag_iqrange_d1 = mag_75per_d1 - mag_25per_d1
xy_corr_d1 = np.correlate(x_d1,y_d1)
xz_corr_d1 = np.correlate(x_d1,z_d1)
yz_corr_d1 = np.correlate(y_d1,z_d1)
#concatenate all features
features = np.array([x_mean,x_mean,z_mean,x_std,y_std,z_std,xy_corr,xz_corr,yz_corr,\
x_10per,x_25per,x_50per,x_75per,x_90per,x_max,x_min,x_medcross,x_range,x_iqrange,\
y_10per,y_25per,y_50per,y_75per,y_90per,y_max,y_min,y_medcross,y_range,y_iqrange,\
z_10per,z_25per,z_50per,z_75per,z_90per,z_max,z_min,z_medcross,z_range,z_iqrange,\
mag_mean,mag_std,mag_10per,mag_25per,mag_50per,mag_75per,mag_90per,mag_max,mag_min,mag_medcross,mag_range,mag_iqrange,\
x_d1_mean,y_d1_mean,z_d1_mean,x_d1_std,y_d1_std,z_d1_std,xy_corr_d1,xz_corr_d1,yz_corr_d1,\
x_10per_d1,x_25per_d1,x_50per_d1,x_75per_d1,x_90per_d1,x_max_d1,x_min_d1,x_medcross_d1,x_range_d1,x_iqrange_d1,\
y_10per_d1,y_25per_d1,y_50per_d1,y_75per_d1,y_90per_d1,y_max_d1,y_min_d1,y_medcross_d1,y_range_d1,y_iqrange_d1,\
z_10per_d1,z_25per_d1,z_50per_d1,z_75per_d1,z_90per_d1,z_max_d1,z_min_d1,z_medcross_d1,z_range_d1,z_iqrange_d1,\
mag_mean_d1,mag_std_d1,mag_10per_d1,mag_25per_d1,mag_50per_d1,mag_75per_d1,mag_90per_d1,mag_max_d1,mag_min_d1,mag_medcross_d1,mag_range_d1,mag_iqrange_d1])
features = np.concatenate((features,array[0,3:]))
features = np.expand_dims(features, axis=0)
return features
if __name__ == "__main__":
# verify the required arguments are given
if (len(sys.argv) < 2):
print 'Usage: python record_fetcher_within_subject.py <1 for 2-category labels, 0 for 5-category labels>'
exit(1)
if sys.argv[1] == '1':
binary = True
elif sys.argv[1] == '0':
binary = False
else:
print 'Usage: python record_fetcher_within_subject.py <1 for 2-category labels, 0 for 5-category labels>'
exit(1)
rf = record_fetcher()
X,y = rf.fetch(1000,50,binary=binary)
np.save('X',X)
np.save('y',y)
|
|
class SASTask:
def __init__(self, variables, init, goal, operators,
temp_operators,axioms, num_axioms, comp_axioms):
self.variables = variables
self.init = init
self.goal = goal
self.operators = operators
self.temp_operators = temp_operators
self.axioms = axioms
self.num_axioms = num_axioms
self.comp_axioms = comp_axioms
def output(self, stream):
self.variables.output(stream)
self.init.output(stream)
self.goal.output(stream)
# print >> stream, len(self.operators)
# for op in self.operators:
# op.output(stream)
print >> stream, len(self.temp_operators)
for op in self.temp_operators:
op.output(stream)
print >> stream, len(self.axioms)
for axiom in self.axioms:
axiom.output(stream)
print >> stream, len(self.comp_axioms)
for axiom in self.comp_axioms:
axiom.output(stream)
print >> stream, len(self.num_axioms)
for axiom in self.num_axioms:
axiom.output(stream)
class SASVariables:
def __init__(self, ranges, axiom_layers):
self.ranges = ranges
self.axiom_layers = axiom_layers
def dump(self):
for var, (rang, axiom_layer) in enumerate(zip(self.ranges, self.axiom_layers)):
if axiom_layer != -1:
axiom_str = " [axiom layer %d]" % axiom_layer
else:
axiom_str = ""
print "v%d in {%s}%s" % (var, range(rang), axiom_str)
def output(self, stream):
print >> stream, "begin_variables"
print >> stream, len(self.ranges)
for var, (rang, axiom_layer) in enumerate(zip(self.ranges, self.axiom_layers)):
print >> stream, "var%d %d %d" % (var, rang, axiom_layer)
print >> stream, "end_variables"
class SASInit:
def __init__(self, values):
self.values = values
def dump(self):
for var, val in enumerate(self.values):
if val != -1:
print "v%d: %d" % (var, val)
def output(self, stream):
print >> stream, "begin_state"
for val in self.values:
print >> stream, val
print >> stream, "end_state"
class SASGoal:
def __init__(self, pairs):
self.pairs = sorted(pairs)
def dump(self):
for var, val in self.pairs:
print "v%d: %d" % (var, val)
def output(self, stream):
print >> stream, "begin_goal"
print >> stream, len(self.pairs)
for var, val in self.pairs:
print >> stream, var, val
print >> stream, "end_goal"
class SASOperator:
def __init__(self, name, prevail, pre_post, assign_effects):
self.name = name
self.prevail = sorted(prevail)
self.pre_post = sorted(pre_post)
self.assign_effects = assign_effects
def dump(self):
print self.name
print "Prevail:"
for var, val in self.prevail:
print " v%d: %d" % (var, val)
print "Pre/Post:"
for var, pre, post, cond in self.pre_post:
if cond:
cond_str = " [%s]" % ", ".join(["%d: %d" % tuple(c) for c in cond])
else:
cond_str = ""
print " v%d: %d -> %d%s" % (var, pre, post, cond_str)
def output(self, stream):
print >> stream, "begin_operator"
print >> stream, self.name[1:-1]
print >> stream, len(self.prevail)
for var, val in self.prevail:
print >> stream, var, val
num = len(self.pre_post) + len(self.assign_effects)
print >> stream, num
for var, pre, post, cond in self.pre_post:
print >> stream, len(cond),
for cvar, cval in cond:
print >> stream, cvar, cval,
print >> stream, var, pre, post
for assignment in self.assign_effects:
assignment.output(stream)
print >> stream, "end_operator"
class SASTemporalOperator:
def __init__(self, name, duration, prevail, pre_post, assign_effects):
self.name = name
## Currently we assume in the knowledge compilation
## and search that there is a single exact at start
## duration constraint. If someone wants to change
## this it is only necessary to adapt the output
## method and to remove this assertion
assert (len(duration[1]) == 0 and len(duration[0]) == 1
and duration[0][0].op == "="), \
"unsupported duration constraint"
self.duration = duration
self.prevail = prevail
self.pre_post = pre_post
self.assign_effects = assign_effects
def output(self, stream):
print >> stream, "begin_operator"
print >> stream, self.name[1:-1]
self.duration[0][0].output(stream)
for time in range(3):
print >> stream, len(self.prevail[time])
for var, val in self.prevail[time]:
print >> stream, var, val
for time in range(2):
num = len(self.pre_post[time]) + len(self.assign_effects[time])
print >> stream, num
for var, pre, post, cond in self.pre_post[time]:
for cond_time in range(3):
print >> stream, len(cond[cond_time]),
for cvar, cval in cond[cond_time]:
print >> stream, cvar, cval,
print >> stream, var, pre, post
for assignment in self.assign_effects[time]:
assignment.output(stream)
print >> stream, "end_operator"
class SASDuration:
def __init__(self, op, var):
self.op = op
self.var = var
def output(self, stream):
print >> stream, self.op, self.var
class SASAssignmentEffect:
def __init__(self, var, op, valvar, prevail, temporal=False):
self.var = var
self.op = op
self.valvar = valvar
self.prevail = prevail
self.temporal = temporal
def output(self, stream):
if self.temporal:
for time in range(3):
print >> stream, len(self.prevail[time]),
for var, val in self.prevail[time]:
print >> stream, var, val,
else:
print >> stream, len(self.prevail),
for var, val in self.prevail:
print >> stream, var, val,
print >> stream, self.var, self.op, self.valvar
class SASAxiom:
def __init__(self, condition, effect):
self.condition = condition
self.effect = effect
assert self.effect[1] in (0, 1)
for _, val in condition:
assert val >= 0, condition
def dump(self):
print "Condition:"
for var, val in self.condition:
print " v%d: %d" % (var, val)
print "Effect:"
var, val = self.effect
print " v%d: %d" % (var, val)
def output(self, stream):
print >> stream, "begin_rule"
print >> stream, len(self.condition)
for var, val in self.condition:
print >> stream, var, val
var, val = self.effect
print >> stream, var, 1 - val, val
print >> stream, "end_rule"
class SASCompareAxiom:
def __init__(self, comp, parts, effect):
self.comp = comp
self.parts = parts
self.effect = effect
def dump(self):
values = (self.effect, self.comp,
" ".join([str(var) for var in self.parts]))
print "v%d: %s %s" % values
def output(self, stream):
values = (self.effect, self.comp,
" ".join([str(var) for var in self.parts]))
print >> stream, "%d %s %s" % values
class SASNumericAxiom:
def __init__(self, op, parts, effect):
self.op = op
self.parts = parts
self.effect = effect
def dump(self):
values = (self.effect, self.op,
" ".join([str(var) for var in self.parts]))
print "v%d: %s %s" % values
def output(self, stream):
values = (self.effect, self.op,
" ".join([str(var) for var in self.parts]))
print >> stream, "%d %s %s" % values
|
|
# -*- coding: utf-8 -*-
import asyncio
import os
import re
import signal
import pytest
from unittest import mock
from strawboss import main
@mock.patch('dotenvfile.loadfile')
def test_main_procfile_not_found(load_dotenvfile,
subprocess_factory, capfd, event_loop):
loop = asyncio.get_event_loop()
# Run the main function.
with pytest.raises(SystemExit) as exc:
main(['--procfile', './does-not-exist', '--no-env'])
assert int(str(exc.value)) == 2
# Since the Procfile does not exist, we get an error.
_, stderr = capfd.readouterr()
assert stderr.strip() == 'Procfile not found at "./does-not-exist".'
@mock.patch('dotenvfile.loadfile')
@mock.patch('procfile.loadfile')
def test_main_procfile_empty(load_procfile, load_dotenvfile,
subprocess_factory, capfd, event_loop):
loop = asyncio.get_event_loop()
load_procfile.return_value = {}
# Run the main function.
with pytest.raises(SystemExit) as exc:
main(['--no-env'])
assert int(str(exc.value)) == 2
# Since the Procfile is empty, we get an error.
_, stderr = capfd.readouterr()
assert stderr.strip() == 'Nothing to run.'
@mock.patch('dotenvfile.loadfile')
@mock.patch('procfile.loadfile')
def test_main(load_procfile, load_dotenvfile, subprocess_factory, capfd, event_loop):
load_procfile.return_value = {
'foo': {
'cmd': 'false',
'env': {},
},
}
# Automatically trigger CTRL-C a short while from now.
event_loop.call_later(1.0, os.kill, os.getpid(), signal.SIGINT)
# Run the main function!
main(['--no-env'])
stdout, stderr = capfd.readouterr()
# Error log should be empty.
assert stderr.strip() == ''
# Output log should contain start & stop info for each subprocess.
#
# NOTE: we strip timestamps from the logs since they don't matter here.
lines = stdout.strip().split('\n')
lines = [line.split(' ', 1)[1] for line in lines]
expected_lines = []
for p in subprocess_factory.instances:
expected_lines.extend([
'[strawboss] foo.0(%d) spawned.' % p.pid,
'[strawboss] foo.0(%d) killed.' % p.pid,
'[strawboss] foo.0(%d) completed with exit status -9.' % p.pid,
])
assert len(subprocess_factory.instances) > 0
assert set(lines) == set(expected_lines)
@mock.patch('dotenvfile.loadfile')
@mock.patch('procfile.loadfile')
def test_main_idempotent_ctrl_c(load_procfile, load_dotenvfile,
subprocess_factory, capfd, event_loop):
load_procfile.return_value = {
'foo': {
'cmd': 'false',
'env': {},
},
}
# Automatically trigger CTRL-C a short while from now.
#
# NOTE: intentionally schedule this twice to simulate the case where we get
# two CTRL-C events before we can react.
event_loop.call_later(1.0, os.kill, os.getpid(), signal.SIGINT)
event_loop.call_later(1.0, os.kill, os.getpid(), signal.SIGINT)
# Run the main function!
main(['--no-env'])
stdout, stderr = capfd.readouterr()
# Error log should be empty.
assert stderr.strip() == ''
# Output log should contain start & stop info for each subprocess.
#
# NOTE: we strip timestamps from the logs since they don't matter here.
lines = stdout.strip().split('\n')
lines = [line.split(' ', 1)[1] for line in lines]
expected_lines = []
for p in subprocess_factory.instances:
expected_lines.extend([
'[strawboss] foo.0(%d) spawned.' % p.pid,
'[strawboss] foo.0(%d) killed.' % p.pid,
'[strawboss] foo.0(%d) completed with exit status -9.' % p.pid,
])
assert len(subprocess_factory.instances) > 0
assert set(lines) == set(expected_lines)
@mock.patch('sys.argv', ['strawboss', '--no-env', '--scale=*:2'])
@mock.patch('dotenvfile.loadfile')
@mock.patch('procfile.loadfile')
def test_main_cli(load_procfile, load_dotenvfile,
subprocess_factory, capfd, event_loop):
load_procfile.return_value = {
'foo': {
'cmd': 'false',
'env': {},
},
}
# Automatically trigger CTRL-C a short while from now.
event_loop.call_later(1.0, os.kill, os.getpid(), signal.SIGINT)
# Run the main function!
main()
stdout, stderr = capfd.readouterr()
# Error log should be empty.
assert stderr.strip() == ''
# Output log should contain start & stop info for each subprocess.
#
# NOTE: we strip timestamps and PIDs from the logs since they don't matter
# here and it makes comparisons simpler.
lines = stdout.strip().split('\n')
lines = [re.sub(r'\(\d+\)', r'(?)', line.split(' ', 1)[1]) for line in lines]
expected_lines = []
for i in range(2):
expected_lines.extend([
'[strawboss] foo.%d(?) spawned.' % i,
'[strawboss] foo.%d(?) killed.' % i,
'[strawboss] foo.%d(?) completed with exit status -9.' % i,
])
print(lines)
assert len(subprocess_factory.instances) > 0
assert set(lines) == set(expected_lines)
@mock.patch('dotenvfile.loadfile')
@mock.patch('procfile.loadfile')
def test_main_envfile(load_procfile, load_dotenvfile,
subprocess_factory, capfd, event_loop):
load_procfile.return_value = {
'foo': {
'cmd': 'false',
'env': {
'ENV1': 'bar',
},
},
}
load_dotenvfile.return_value = {
'ENV2': 'meh',
'ENV3': 'qux',
}
# Automatically trigger CTRL-C a short while from now.
event_loop.call_later(1.0, os.kill, os.getpid(), signal.SIGINT)
# Run the main function!
main([])
stdout, stderr = capfd.readouterr()
# Error log should be empty.
assert stderr.strip() == ''
# Output log should contain start & stop info for each subprocess.
#
# NOTE: we strip timestamps and PIDs from the logs since they don't matter
# here and it makes comparisons simpler.
lines = [line.split(' ', 1)[1] for line in stdout.strip().split('\n')]
expected_lines = []
for p in subprocess_factory.instances:
env = {k: p.env[k] for k in ('ENV1', 'ENV2', 'ENV3')}
assert env == {
'ENV1': 'bar',
'ENV2': 'meh',
'ENV3': 'qux',
}
expected_lines.extend([
'[strawboss] foo.0(%d) spawned.' % p.pid,
'[strawboss] foo.0(%d) killed.' % p.pid,
'[strawboss] foo.0(%d) completed with exit status -9.' % p.pid,
])
print(lines)
assert len(subprocess_factory.instances) > 0
assert set(lines) == set(expected_lines)
@mock.patch('dotenvfile.loadfile')
@mock.patch('procfile.loadfile')
def test_main_envfile_missing(load_procfile, load_dotenvfile,
subprocess_factory, capfd, event_loop):
load_procfile.return_value = {
'foo': {
'cmd': 'false',
'env': {
'ENV1': 'bar',
},
},
}
load_dotenvfile.side_effect = FileNotFoundError
# Automatically trigger CTRL-C a short while from now.
event_loop.call_later(1.0, os.kill, os.getpid(), signal.SIGINT)
# Run the main function!
main([])
stdout, stderr = capfd.readouterr()
# Error log should be empty.
assert stderr.strip() == 'Warning: environment file ".env" not found.'
# Output log should contain start & stop info for each subprocess.
#
# NOTE: we strip timestamps and PIDs from the logs since they don't matter
# here and it makes comparisons simpler.
lines = [line.split(' ', 1)[1] for line in stdout.strip().split('\n')]
expected_lines = []
for p in subprocess_factory.instances:
env = {k: p.env[k] for k in ('ENV1',)}
assert env == {
'ENV1': 'bar',
}
expected_lines.extend([
'[strawboss] foo.0(%d) spawned.' % p.pid,
'[strawboss] foo.0(%d) killed.' % p.pid,
'[strawboss] foo.0(%d) completed with exit status -9.' % p.pid,
])
print(lines)
assert len(subprocess_factory.instances) > 0
assert set(lines) == set(expected_lines)
@mock.patch('dotenvfile.loadfile')
@mock.patch('procfile.loadfile')
def test_main_envfile_missing_partial(load_procfile, load_dotenvfile,
subprocess_factory, capfd, event_loop):
load_procfile.return_value = {
'foo': {
'cmd': 'false',
'env': {
'ENV1': 'bar',
},
},
}
load_dotenvfile.side_effect = [
{'ENV2': 'meh'},
FileNotFoundError,
{'ENV3': 'qux'},
]
# Automatically trigger CTRL-C a short while from now.
event_loop.call_later(1.0, os.kill, os.getpid(), signal.SIGINT)
# Run the main function!
main([
'--envfile', '.env.common',
'--envfile', '.env.prod',
'--envfile', '.env.local',
])
stdout, stderr = capfd.readouterr()
# Error log should be empty.
assert stderr.strip() == 'Warning: environment file ".env.prod" not found.'
# Output log should contain start & stop info for each subprocess.
#
# NOTE: we strip timestamps and PIDs from the logs since they don't matter
# here and it makes comparisons simpler.
lines = [line.split(' ', 1)[1] for line in stdout.strip().split('\n')]
expected_lines = []
for p in subprocess_factory.instances:
env = {k: p.env[k] for k in ('ENV1',)}
assert env == {
'ENV1': 'bar',
}
expected_lines.extend([
'[strawboss] foo.0(%d) spawned.' % p.pid,
'[strawboss] foo.0(%d) killed.' % p.pid,
'[strawboss] foo.0(%d) completed with exit status -9.' % p.pid,
])
print(lines)
assert len(subprocess_factory.instances) > 0
assert set(lines) == set(expected_lines)
|
|
import logging
from django.db import models
from django.contrib.auth.models import User
from django.db import IntegrityError
from apps.fbschema.struct_models import *
from apps.fbschema.utils import get_fql_from_model
from apps.fbschema.utils import compare_keys_with_fields
logger = logging.getLogger(__name__)
class BaseFbModel(models.Model):
'''
Abstract class to define some common method helping us to generate queries
'''
ignore_fields=['id', 'user']
@classmethod
def fql_query(self, clause):
'''
Returns the fql query made from model's fields. As it is operated on class itself
rater than its instance hence @classmethod
'''
return get_fql_from_model(self, clause)
@classmethod
def fql_query_me(self):
'''
Returns the fql query using fql_query method. Clause is specific to return data tuples which are
associated directly with user itself, i.e. owner = me() in fql
'''
return self.fql_query(self.me_clause)
@classmethod
def fql_query_my_friends(self, friend_uid):
'''
Returns the fql query using fql_query method. Clause is specific to return data tuples which are
associated directly with user's freinds, i.e. owner = %d in fql
'''
return self.fql_query(self.my_friend_clause % friend_uid)
@classmethod
def local_fql_query(self, request, context_uid):
'''
Returns user related tuple from database model if no friend_uid is mentioned
Returns user's friend related tuple from database model if friend_uid has ben mentioned
These returned tuple will be used to generate model_sets so that we can compare it with result sets
'''
if not context_uid:
logger.info("context_uid is None due to owner_identifier=ONLY_SESSION_USER, fetching last `facebook_row_limit` tuples to compare with result data set")
print("context_uid is None due to owner_identifier=ONLY_SESSION_USER, fetching last `facebook_row_limit` tuples to compare with result data set")
return self.objects.all()[:self.facebook_row_limit]
facebookuser = FacebookUser.objects.get(user=request.user, uid=context_uid)
kwargs = { self.owner_identifier : facebookuser }
return self.objects.filter(user=request.user, **kwargs)
@classmethod
def primary_identifier_class(self):
'''
Returns the class name of primary identifier for a particular model, useful when we want to perform cleaning operation
'''
return [ field for field in self._meta.fields if getattr(field, 'name') == self.primary_identifier ][0]
@classmethod
def prepare_dict(self, response_data_dict, request=None):
from apps.fbschema.parse_utils import parse_clean_field_value
'''
Receives the response from fql graph query. Cleans/Parses the data and returns a valid
dict that can be used to create and save a tuple/data-record.
Applied operations on data in this process is in context with the model in concern, hence
it should be called with proper database model, otherwise compare key function will fail
'''
if not compare_keys_with_fields(self, response_data_dict.keys()): #TODO: This compare test is happening for each tuple, can we do better ?
raise ValueError #TODO: Use proper exception
model = self # Just to make code more readable
data_dict = {}
fields = [field for field in model._meta.fields if getattr(field, 'name') not in model.ignore_fields]
for field in fields:
field_name = getattr(field, 'name')
key = field_name
value = parse_clean_field_value(field, response_data_dict[key], request)
data_dict[key] = value
return data_dict
@classmethod
def save_update_delete(self, request, response_data, stream_nature=False):
from apps.fbschema.parse_utils import parse_clean_field_value
'''
Main function which saves, updates and deletes on updated result sets
This function is always called with subclasses
@stream_nature = For example facebook stream is a stream of incoming items. If we compare last 'n' number of database items with incoming
response items and if we don't define stream_nature then existing code will delete those old database items assuming
they don't exist, hence is a stream in which we always save and update, we never delete
@ONLY_SESSION_USER = local_fql_query uses 'owner_identifier' so that it can bring database items in context to compare with response items.
context_uid is usally me() or a freind's uid, but in some tables only session user is allowed so there is no need of
context_uid so we return 'n' last number of records OR all records
'''
if self.owner_identifier == "ONLY_SESSION_USER":
context_uid = None
else:
context_uid = response_data[0][self.owner_identifier]
#Assumptions response_data (list) has some values (dict)
model_data_list = self.local_fql_query(request, context_uid)
primary_identifier = self.primary_identifier
#Response data set of 'primary key', we also need to make sure that data we are compared are 'cleaned'
response_data_set = set([parse_clean_field_value( self.primary_identifier_class(), response_data_dict[primary_identifier] ) \
for response_data_dict in response_data])
#Model data set of 'primary key'
model_data_set = set([getattr(model_data, primary_identifier) for model_data in model_data_list])
#Delete list
if stream_nature:
delete_list = []
else:
delete_list = list(model_data_set.difference(response_data_set))
#Add List
add_list = list(response_data_set.difference(model_data_set))
#Update List
update_list = list(response_data_set.intersection(model_data_set))
print response_data_set
print model_data_set
print add_list
print update_list
for response_data_dict in response_data:
data_dict = self.prepare_dict(response_data_dict, request)
if data_dict[primary_identifier] in add_list:
#Add here
try:
data_tuple = self(user=request.user, **data_dict)
data_tuple.save()
except IntegrityError:
'''
There are some cases where due to facebook data nature we expect integrity error within ids being saved.
This exception should be handled by that particular model which is expecting this case.
A use case : FacebookStream
with some post_id : 'x' changed his profile picture
with same post_id : 'x' and seven others changed profile pictures
'''
logger.info("Integrity Exception handled, Model = %s" % self.__class__)
print("Integrity Exception handled, Model = %s" % self.__class__)
data_tuple = self.handle_integrity_exception(request, data_dict)
data_tuple.save()
elif data_dict[primary_identifier] in update_list:
#Update here
kwargs = { primary_identifier : data_dict[primary_identifier] }
data_tuple = self.objects.get(user=request.user, **kwargs)
data_tuple = self(user=request.user, id=data_tuple.id, **data_dict)
data_tuple.save()
class Meta:
abstract = True
'''
Until unless 'Facebook' Prefix is specified the model names and field names are exact as facebook schemas. Target is to avoid any confusion, we are just
looking for the data
'''
class FacebookAlbum(BaseFbModel):
'''
Album Table
'''
'''
Interesting:
1) uid can be me and my friends, that means this model is going to store all albums in my social graph ( or more than that : out of focus right now )
2) #Me and #My Friends = Two UID clusters
'''
fqlname = "album"
primary_identifier = "aid"
owner_identifier = "owner"
me_clause = "WHERE owner=me()"
my_friend_clause = "WHERE owner=%d"
## Django Application specific fields
user = models.ForeignKey(User, help_text="Data belongs with this system user: Viewer")
## Fb Schema specific fields
aid = models.CharField( db_index=True, max_length=100, help_text="The album ID" )
#TODO: aid is not biginteger ? invalid literal for int() with base 10: '100000001295087_31004'
backdated_time = models.DateTimeField( blank=True, null=True, help_text="Time that the album is backdated to" )
can_backdate = models.NullBooleanField( blank=True, null=True, help_text="Can the album be backdated on Timeline" )
can_upload = models.NullBooleanField( blank=True, null=True, help_text="Determines whether a given UID can upload to the album. It is true\
if the following conditions are met: The user owns the album, the album is not a special album like the profile pic\
album, the album is not full.")
comment_info = models.ForeignKey( StructCommentInfo, blank=True, null=True)
cover_object_id = models.BigIntegerField( help_text="The album cover photo object_id" )
cover_pid = models.BigIntegerField( help_text="The album cover photo ID string" )
created = models.DateTimeField( blank=True, null=True, help_text="The time the photo album was initially created expressed as UNIX time." )
description = models.TextField( blank=True, null=True, help_text="The description of the album")
edit_link = models.CharField( max_length=500, blank=True, null=True, help_text="The URL for editing the album")
is_user_facing = models.NullBooleanField( blank=True, null=True, help_text="Determines whether or not the album should be shown to users." )
like_info = models.ForeignKey( StructLikeInfo, blank=True, null=True )
link = models.CharField( max_length=500, blank=True, null=True, help_text="A link to this album on Facebook")
location = models.CharField( max_length=100, blank=True, null=True, help_text="The location of the album")
modified = models.DateTimeField( blank=True, null=True, help_text="The last time the photo album was updated expressed as UNIX time." )
modified_major = models.DateTimeField( blank=True, null=True, help_text="Indicates the time a major update (like addition of photos)\
was last made to the album expressed as UNIX time." )
name = models.TextField( blank=True, null=True, help_text="The title of the album")
object_id = models.BigIntegerField( db_index=True, help_text="The object_id of the album on Facebook.")
owner = models.ForeignKey('FacebookUser', related_name="albums", help_text="The user ID of the owner of the album")
owner_cursor = models.CharField( max_length=100, blank=True, null=True, help_text="Cursor for the owner field")
photo_count = models.IntegerField( blank=True, null=True, help_text="The number of photos in the album" )
place_id = models.BigIntegerField( blank=True, null=True, help_text="Facebook ID of the place associated with the album, if any.")
type = models.CharField( max_length=50, blank=True, null=True, help_text="The type of photo album. Can be one of profile:\
The album containing profile pictures, mobile: The album containing mobile uploaded photos, wall: The album\
containing photos posted to a user's Wall, normal: For all other albums.")
video_count = models.IntegerField( blank=True, null=True, help_text="The number of videos in the album" )
visible = models.CharField( max_length=50, blank=True, null=True, help_text="Visible only to the album owner. \
Indicates who can see the album. The value can be one of friends, friends-of-friends, networks, everyone,\
custom (if the visibility doesn't match any of the other values" )
def __unicode__(self):
return self.name
class Meta:
unique_together = (("user", "object_id", "owner"),)
class FacebookPhoto(BaseFbModel):
'''
Photo Table
'''
fqlname = "photo"
primary_identifier = "pid"
owner_identifier = "owner" # for some models object_id is going to behave like owner
me_clause = "WHERE owner=me() limit 5000"
my_friend_clause = "WHERE owner=%d limit 5000"
## Django Application specific fields
user = models.ForeignKey(User, help_text="Data belongs with this system user: Viewer")
## Fb Schema specific fields
aid = models.ForeignKey( FacebookAlbum, blank=True, null=True, related_name="photos", help_text="The ID of the album containing the \
photo being queried. The aid cannot be longer than 50 characters.")
aid_cursor = models.CharField( max_length=100, blank=True, null=True, help_text="A cursor used to paginated through \
a query that is indexed on the aid")
album_object_id = models.BigIntegerField( db_index=True, help_text="The object_id of the album the photo belongs to")
album_object_id_cursor = models.CharField( max_length=100, blank=True, null=True, help_text="A cursor used to paginate through\
a query that is indexed on the album_object_id")
backdated_time = models.DateTimeField( blank=True, null=True, help_text="The time the photo was backdated to in Timeline" )
backdated_time_granularity = models.CharField( max_length=100, blank=True, null=True, help_text="A string representing the backdated \
granularity. Valid values are year, month, day, hour, or minute" )
can_backdate = models.NullBooleanField( blank=True, null=True, help_text="true if the viewer is able to backdate the photo" )
can_delete = models.NullBooleanField( blank=True, null=True, help_text="true if the viewer is able to delete the photo" )
can_tag = models.NullBooleanField( blank=True, null=True, help_text="true if the viewer is able to tag the photo" )
caption = models.TextField( blank=True, null=True, help_text="The caption for the photo being queried")
caption_tags = models.TextField( blank=True, null=True, help_text="An array indexed by offset of arrays of the tags in the \
caption of the photo, containing the id of the tagged object, the name of the tag, the offset of where the \
tag occurs in the message and the length of the tag.")
comment_info = models.ForeignKey( StructCommentInfo, blank=True, null=True, help_text="The comment information of the photo \
being queried. This is an object containing can_comment and comment_count")
created = models.DateTimeField( blank=True, null=True, help_text="The date when the photo being queried was added." )
images = models.TextField( blank=True, null=True, help_text="An array of objects containing width, height, source each \
representing the various photo sizes.")
like_info = models.ForeignKey( StructLikeInfo, blank=True, null=True )
link = models.CharField( max_length=500, blank=True, null=True, help_text="The URL to the page containing the photo being queried.")
modified = models.DateTimeField( blank=True, null=True, help_text="The date when the photo being queried was last modified." )
object_id = models.BigIntegerField( db_index=True, help_text="The object_id of the photo")
offline_id = models.BigIntegerField( blank=True, null=True, help_text="The object_id of the photo")
owner = models.ForeignKey('FacebookUser', related_name="myphotos", help_text="The user ID of the photo being queried")
owner_cursor = models.CharField( max_length=100, blank=True, null=True, help_text="A cursor used to paginate through\
a query that is indexed on the owner" )
page_story_id = models.BigIntegerField( blank=True, null=True, help_text="The ID of the feed story about this photo if itbelongs to a page")
pid = models.BigIntegerField( db_index=True, blank=True, null=True, help_text="The ID of the photo being queried. \
The pid cannot be longer than 50 characters." )
place_id = models.BigIntegerField( blank=True, null=True, help_text="Facebook ID of the place associated with the photo, if any.")
position = models.IntegerField( blank=True, null=True, help_text="The position of the photo in the album." )
src = models.CharField( max_length=500, blank=True, null=True, help_text="The URL to the album view version of the photo \
being queried. The image can have a maximum width or height of 130px" )
src_big = models.CharField( max_length=500, blank=True, null=True, help_text="The URL to the full-sized version of the photo \
being queried. The image can have a maximum width or height of 720px, increasing to 960px on 1st March 2012" )
src_big_height = models.IntegerField( blank=True, null=True, help_text="Height of the full-sized version, in px. This field may be blank." )
src_big_width = models.IntegerField( blank=True, null=True, help_text="Width of the full-sized version, in px" )
src_height = models.IntegerField( blank=True, null=True, help_text="Height of the album view version, in px" )
src_small = models.CharField( max_length=500, blank=True, null=True, help_text="The URL to the thumbnail version of the photo \
being queried. The image can have a maximum width of 75px and a maximum height of 225px." )
src_small_height = models.IntegerField( blank=True, null=True, help_text="Height of the thumbnail version, in px. This field may be blank." )
src_small_width = models.IntegerField( blank=True, null=True, help_text="Width of the thumbnail version, in px" )
src_width = models.IntegerField( blank=True, null=True, help_text="Width of the album view version, in px" )
target_id = models.BigIntegerField( blank=True, null=True, help_text="The ID of the target the photo is posted to" )
target_type = models.CharField( max_length=100, blank=True, null=True, help_text="The type of target the photo is posted to" )
class Meta:
unique_together = (("user", "pid", "owner"),)
class FacebookLink(BaseFbModel):
'''
Link Table
'''
fqlname = "link"
primary_identifier = "link_id"
owner_identifier = "owner" # for some models object_id is going to behave like owner
me_clause = "WHERE owner=me() limit 5000"
my_friend_clause = "WHERE owner=%d limit 5000"
## Django Application specific fields
user = models.ForeignKey(User, help_text="Data belongs with this system user: Viewer")
## Fb Schema specific fields
backdated_time = models.DateTimeField( blank=True, null=True, help_text="Time that the link is backdated to." )
can_backdate = models.NullBooleanField( blank=True, null=True, help_text="Can the link be backdated on Timeline" )
caption = models.TextField( blank=True, null=True, help_text="The caption of the link")
comment_info = models.ForeignKey( StructCommentInfo, blank=True, null=True, help_text="The comment information of the link being queried." )
created_time = models.DateTimeField( blank=True, null=True, help_text="The time the user posted the link." )
image_urls = models.TextField( blank=True, null=True, help_text="The URLs to the images associated with the link, \
as taken from the site's link tag." )
like_info = models.ForeignKey( StructLikeInfo, blank=True, null=True ) #Skipping privacy struct as of now
link_id = models.BigIntegerField( db_index=True, help_text="The unique identifier for the link." )
owner = models.ForeignKey('FacebookUser', related_name="links", help_text="The user ID for the user who posted the link.")
owner_comment = models.TextField( blank=True, null=True, help_text="The comment the owner made about the link." )
owner_cursor = models.CharField( max_length=100, blank=True, null=True, help_text="Cursor for the owner field" )
picture = models.CharField( max_length=500, blank=True, null=True, help_text="The URL to the thumbnail image that is displayed by default" )
summary = models.TextField( blank=True, null=True, help_text="A summary of the link, as taken from the site's description meta tag." )
title = models.TextField( blank=True, null=True, help_text="The title of the link, as taken from the site's title meta tag." )
url = models.CharField( max_length=500, blank=True, null=True, help_text="The actual URL for the link." )
via_id = models.BigIntegerField( blank=True, null=True, help_text="The unique identifier of the original link poster." )
class Meta:
unique_together = (("user", "link_id"),)
class FacebookNotification(BaseFbModel):
'''
Notification Table
'''
fqlname = "notification"
primary_identifier = "notification_id"
owner_identifier = "ONLY_SESSION_USER"
#owner_identifier=receipient_id is possible here but notification's nature is more like stream nature
#so for a long notification table it wouldn't make sense to compare with all data sets again and again
#as compared with photo or album objects
facebook_row_limit = 500 #TODO : enough for now
me_clause = "WHERE recipient_id=me()"
## Django Application specific fields
user = models.ForeignKey(User, help_text="Data belongs with this system user: Viewer")
## Fb Schema specific fields
app_id = models.BigIntegerField( blank=True, null=True, help_text="The ID of the application associated with the \
notification. This may be a third-party application or a Facebook application (for example, Wall)." )
body_html = models.TextField( blank=True, null=True, help_text="Any additional content the notification includes, in HTML." )
body_text = models.TextField( blank=True, null=True, help_text="The plaintext version of body_html, with all HTML tags stripped out." )
created_time = models.DateTimeField( blank=True, null=True, help_text="The time the notification was originally sent. Notifications\
older than 7 days are deleted and will not be returned via this table." )
href = models.CharField( max_length=500, blank=True, null=True, help_text="The URL associated with the notification. \
This is usually a location where the user can interact with the subject of the notification." )
icon_url = models.CharField( max_length=500, blank=True, null=True, help_text="The URL associated with the notification's icon." )
is_hidden = models.IntegerField( blank=True, null=True, help_text="Indicates whether the user hid the associated application's notifications." )
is_unread = models.IntegerField( blank=True, null=True, help_text="Indicates whether the notification has been marked as read.\
Use notifications.markRead to mark a notification as read." )
notification_id = models.BigIntegerField( unique=True, blank=True, null=True, help_text="The ID of the notification. This ID is not globally unique, \
so the recipient_id must be specified in addition to it." )
object_id = models.CharField( max_length=500, blank=True, null=True, help_text="The object id of the notification." )
object_type = models.CharField( max_length=50, blank=True, null=True, help_text="The object type (e.g. stream, photo, event etc.) \
of the notification." )
recipient_id = models.ForeignKey( 'FacebookUser', related_name="notifications", help_text="The user ID of the recipient of the\
notification. It is always the current session user." )
sender_id = models.BigIntegerField( blank=True, null=True, help_text="The user ID of the sender of the notification." )
#^Not a ForeignKey, some users might be out of our social graph, and for performance gain because
# Even if we want to store there user tuple in FacebookUser table we will have to afford a http query cycle
# and that thing can be done later, all things apart we have the 'id' if we want to do something on this
title_html = models.TextField( blank=True, null=True, help_text="The main body of the notification in HTML." )
title_text = models.TextField( blank=True, null=True, help_text="The plaintext version of title_html, with all HTML tags stripped out." )
updated_time = models.DateTimeField( blank=True, null=True, help_text="The time the notification was originally sent, or the time the \
notification was updated, whichever is later." )
class FacebookStream(BaseFbModel):
'''
Stream Table
'''
fqlname = "stream"
primary_identifier = "post_id"
owner_identifier = "ONLY_SESSION_USER" # for some models object_id is going to behave like owner
facebook_row_limit = 50
me_clause = "WHERE filter_key in (SELECT filter_key FROM stream_filter WHERE uid=me())"
'''
On facebookstream table there is no owner_identifier that means -
There is no 'context_user' filter
'''
## Django Application specific fields
user = models.ForeignKey(User, help_text="Data belongs with this system user: Viewer")
## Fb Schema specific fields
action_links = models.TextField( blank=True, null=True, help_text="An array containing the text and URL for each action link" )
actor_id = models.BigIntegerField( blank=True, null=True, help_text="The ID of the user, page, group, or event that published the post" )
attribution = models.CharField( max_length=500, blank=True, null=True, help_text="For posts published by apps, the full name of that app" )
created_time = models.DateTimeField( blank=True, null=True, help_text="The time the post was published" )
description = models.TextField( blank=True, null=True, help_text="Text of stories not intentionally generated by users, \
such as those generated when two users become friends. You must have the 'Include recent activity stories'\
migration enabled in your app to retrieve this field" )
description_tags = models.TextField( blank=True, null=True, help_text="The list of tags in the post description" )
expiration_timestamp= models.DateTimeField( blank=True, null=True, help_text="UNIX timestamp of when the offer expires" )
filter_key = models.CharField( max_length=500, blank=True, null=True, help_text="The filter key to fetch data with" )
impressions = models.IntegerField( blank=True, null=True, help_text="Number of impressions of this post." )
is_hidden = models.NullBooleanField( blank=True, null=True, help_text="Whether a post has been set to hidden" )
is_published = models.NullBooleanField( blank=True, null=True, help_text="Whether the post is published" )
message = models.TextField( blank=True, null=True, help_text="The message written in the post" )
message_tags = models.TextField( blank=True, null=True, help_text="The list of tags in the post mssage" )
parent_post_id = models.CharField( max_length=500, blank=True, null=True, help_text="ID of the parent post" )
permalink = models.CharField( max_length=500, blank=True, null=True, help_text="The URL of the post" )
place = models.BigIntegerField( blank=True, null=True, help_text="ID of the place associated with the post" )
post_id = models.CharField( max_length=255, blank=True, null=True, help_text="The ID of the post" )
share_count = models.IntegerField( blank=True, null=True, help_text="Number of times the post has been shared" )
source_id = models.BigIntegerField( db_index=True, blank=True, null=True, help_text="The ID of the user, page, group, \
or event whose wall the post is on" )
subscribed = models.NullBooleanField( blank=True, null=True, help_text="Whether user is subscribed to the post" )
tagged_ids = models.TextField( blank=True, null=True, help_text="An array of IDs tagged in the message of the post." )
target_id = models.BigIntegerField( db_index=True, blank=True, null=True, help_text="The user, page, group, or event to whom the post was directed" )
timeline_visibility = models.CharField( max_length=500, blank=True, null=True, help_text="Timeline visibility information of the post" )
type = models.IntegerField( blank=True, null=True, help_text="The type of this story" )
updated_time = models.DateTimeField( blank=True, null=True, help_text="The time the post was last updated, which occurs when a user \
comments on the post, expressed as a UNIX timestamp" )
via_id = models.BigIntegerField( blank=True, null=True, help_text="ID of the user or Page the post was shared from" )
viewer_id = models.BigIntegerField( blank=True, null=True, help_text="The ID of the current session user" )
with_location = models.NullBooleanField( blank=True, null=True, help_text="ID of the location associated with the post" )
with_tags = models.TextField( blank=True, null=True, help_text="An array of IDs of entities (e.g. users) tagged in this post" )
xid = models.BigIntegerField( blank=True, null=True, help_text="When querying for the feed of a live stream box, \
this is the xid associated with the Live Stream box (you can provide 'default' if one is not available)" )
@classmethod
def handle_integrity_exception(self, request, data_dict):
'''
Handles some rare cases of integrity exceptions. In case we are expecting this error we define this method in model class
'''
kwargs = { self.primary_identifier : data_dict[self.primary_identifier] }
data_tuple = self.objects.get(user=request.user, **kwargs)
data_tuple = self(user=request.user, id=data_tuple.id, **data_dict)
return data_tuple
class Meta:
unique_together = (("user", "post_id"),)
class FacebookUser(BaseFbModel):
'''
User Table
'''
fqlname = "user"
## Django Application specific fields
user = models.ForeignKey(User, help_text="Data belongs with this system user")
#TODO: Move this foreignkey to FacebookFriends model along with 'friend relation' information
#Moral of the story is : if in any fql schema if there is any 'viewer related' field then we will need this foreignkey 'user'
## Fb Schema specific fields
about_me = models.TextField( blank=True, null=True, help_text="More information about the user being queried")
activities = models.TextField( blank=True, null=True, help_text="The user's activities")
affiliations = models.TextField( blank=True, null=True, help_text="The networks to which the user being queried \
belongs. The status field within this field will only return results in English")
# max_length = 3000 represents is an array ( Array doesn't have fixed fields, so for now CharField is enough )
age_range = models.ForeignKey(StructAgeRange, blank=True, null=True)
allowed_restrictions= models.CharField( max_length=3000, blank=True, null=True, help_text="A comma-delimited list of demographic \
restriction types a user is allowed to access. Currently, alcohol is the only type that can get returned")
birthday = models.DateTimeField( blank=True, null=True, help_text="The user's birthday. The format of this date varies based\
on the user's locale")
books = models.TextField( blank=True, null=True, help_text="The user's favorite books")
can_message = models.NullBooleanField( blank=True, null=True, help_text="Whether the user can send a message to another user" )
## <Lean:Stopage> I am currently picking up fields I am interested in, and will try to basic import process which can update things
devices = models.CharField( max_length=1000, blank=True, null=True, help_text="An array of objects containing fields os")
education = models.TextField( blank=True, null=True, help_text="A list of the user's education history. Contains\
year and type fields, and school object (name, id, type, and optional year, degree, concentration array, classes array,\
and with array )")
email = models.EmailField( blank=True, null=True, help_text="A string containing the user's primary Facebook email address \
or the user's proxied email address, whichever address the user granted your application. Facebook recommends you query\
this field to get the email address shared with your application")
first_name = models.CharField( max_length=200, blank=True, null=True, help_text="The user's first name" )
friend_count = models.IntegerField( blank=True, null=True, help_text="Count of all the user's friends" )
interests = models.TextField( blank=True, null=True, help_text="The user's interests" )
likes_count = models.IntegerField( blank=True, null=True, help_text="Count of all the pages this user has liked" )
movies = models.TextField( blank=True, null=True, help_text="The user's favorite movies")
mutual_friend_count = models.IntegerField( blank=True, null=True, help_text="The number of mutual friends shared by the \
user being queried and the session user")
quotes = models.TextField( blank=True, null=True, help_text="The user's favorite quotes")
relationship_status = models.CharField( max_length=50, blank=True, null=True, help_text="The type of relationship for the user being queried")
religion = models.CharField( max_length=50, blank=True, null=True, help_text="The user's religion")
sex = models.CharField( max_length=50, blank=True, null=True, help_text="The user's gender")
subscriber_count = models.IntegerField( blank=True, null=True, help_text="The user's total number of subscribers")
movies = models.TextField( blank=True, null=True, help_text="The user's favorite television shows")
uid = models.BigIntegerField( db_index=True, help_text="The user ID") # primary_key=True ? - No if you want to make it multi-user oriented
username = models.CharField( max_length=500, blank=True, null=True, help_text="The user's username")
wall_count = models.IntegerField( blank=True, null=True, help_text="The user ID") # primary_key=True ?
website = models.CharField( max_length=1000, blank=True, null=True, help_text="The website")
work = models.TextField( max_length=3000, blank=True, null=True, help_text="A list of the user's work history.\
Contains employer, location, position, start_date and end_date fields")
def __unicode__(self):
return self.username
class Meta:
unique_together = (("user", "uid"),)
class FacebookFriend(BaseFbModel):
'''
Friend Table
'''
fqlname = "friend"
## Django Application specific fields
user = models.ForeignKey(User, help_text="Data belongs with this system user")
## Fb Schema specific fields
uid1 = models.BigIntegerField( db_index=True, help_text="The user ID of the first user in a particular friendship link.")
uid2 = models.BigIntegerField( db_index=True, help_text="The user ID of the second user in a particular friendship link.")
class Meta:
unique_together = (("user", "uid1", "uid2"),)
class FacebookLike(BaseFbModel):
'''
Like Table
'''
fqlname = "like"
## Fb Schema specific fields
object_id = models.BigIntegerField( db_index=True, help_text="The object_id of a video, note, link, photo, or album. \
Note: For photos and albums, the object_id is a different field from the photo table pid field and the \
album table aid field, use the specified object_id from those tables instead.")
object_type = models.CharField( max_length=50, blank=True, null=True, help_text="The type of the liked object. One of: photo, album,\
event, group, note, link, video, application, status, check-in, review, comment, post")
#However facebook is showing only 'profile' to me
post_id = models.BigIntegerField( db_index=True, blank=True, null=True, help_text="The id of a post on Facebook. This can be a stream post\
containing a status, video, note, link, photo, or photo album")
user_id = models.ForeignKey('FacebookUser', related_name="likes", help_text="The user who likes this object.")
#TODO: This relation is defined by request.user+user_id that means each different system user has its own set of users in 'facebookusers'
#and that is not an expected behaviour. As of now I am not separating them as to do that I have to alter fql schema
#So that I can write 'friend relation' specific information in 'friends' table rather than in 'facebookuser' table
class Meta:
unique_together = (("object_id", "user_id"),) #Not having 'user' tells that it's a viewer free model
|
|
# Copyright 2011 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from oslo.serialization import jsonutils
from nova.compute import api as compute_api
from nova.compute import manager as compute_manager
import nova.context
from nova import db
from nova import exception
from nova.network import api as network_api
from nova.network import manager as network_manager
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import base as obj_base
from nova.objects import virtual_interface as vif_obj
from nova.pci import device as pci_device
from nova.tests.objects import test_fixed_ip
from nova.tests.objects import test_instance_info_cache
from nova.tests.objects import test_pci_device
HOST = "testhost"
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
class FakeModel(dict):
"""Represent a model from the db."""
def __init__(self, *args, **kwargs):
self.update(kwargs)
class FakeNetworkManager(network_manager.NetworkManager):
"""This NetworkManager doesn't call the base class so we can bypass all
inherited service cruft and just perform unit tests.
"""
class FakeDB:
vifs = [{'id': 0,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'instance_uuid': '00000000-0000-0000-0000-000000000010',
'network_id': 1,
'uuid': 'fake-uuid',
'address': 'DC:AD:BE:FF:EF:01'},
{'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'instance_uuid': '00000000-0000-0000-0000-000000000020',
'network_id': 21,
'uuid': 'fake-uuid2',
'address': 'DC:AD:BE:FF:EF:02'},
{'id': 2,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'instance_uuid': '00000000-0000-0000-0000-000000000030',
'network_id': 31,
'uuid': 'fake-uuid3',
'address': 'DC:AD:BE:FF:EF:03'}]
floating_ips = [dict(address='172.16.1.1',
fixed_ip_id=100),
dict(address='172.16.1.2',
fixed_ip_id=200),
dict(address='173.16.1.2',
fixed_ip_id=210)]
fixed_ips = [dict(test_fixed_ip.fake_fixed_ip,
id=100,
address='172.16.0.1',
virtual_interface_id=0),
dict(test_fixed_ip.fake_fixed_ip,
id=200,
address='172.16.0.2',
virtual_interface_id=1),
dict(test_fixed_ip.fake_fixed_ip,
id=210,
address='173.16.0.2',
virtual_interface_id=2)]
def fixed_ip_get_by_instance(self, context, instance_uuid):
return [dict(address='10.0.0.0'), dict(address='10.0.0.1'),
dict(address='10.0.0.2')]
def network_get_by_cidr(self, context, cidr):
raise exception.NetworkNotFoundForCidr(cidr=cidr)
def network_create_safe(self, context, net):
fakenet = dict(net)
fakenet['id'] = 999
return fakenet
def network_get(self, context, network_id, project_only="allow_none"):
return {'cidr_v6': '2001:db8:69:%x::/64' % network_id}
def network_get_by_uuid(self, context, network_uuid):
raise exception.NetworkNotFoundForUUID(uuid=network_uuid)
def network_get_all(self, context):
raise exception.NoNetworksFound()
def network_get_all_by_uuids(self, context, project_only="allow_none"):
raise exception.NoNetworksFound()
def network_disassociate(self, context, network_id):
return True
def virtual_interface_get_all(self, context):
return self.vifs
def fixed_ips_by_virtual_interface(self, context, vif_id):
return [ip for ip in self.fixed_ips
if ip['virtual_interface_id'] == vif_id]
def fixed_ip_disassociate(self, context, address):
return True
def __init__(self, stubs=None):
self.db = self.FakeDB()
if stubs:
stubs.Set(vif_obj, 'db', self.db)
self.deallocate_called = None
self.deallocate_fixed_ip_calls = []
self.network_rpcapi = network_rpcapi.NetworkAPI()
# TODO(matelakat) method signature should align with the faked one's
def deallocate_fixed_ip(self, context, address=None, host=None,
instance=None):
self.deallocate_fixed_ip_calls.append((context, address, host))
# TODO(matelakat) use the deallocate_fixed_ip_calls instead
self.deallocate_called = address
def _create_fixed_ips(self, context, network_id, fixed_cidr=None,
extra_reserved=None, bottom_reserved=0,
top_reserved=0):
pass
def get_instance_nw_info(context, instance_id, rxtx_factor,
host, instance_uuid=None, **kwargs):
pass
def fake_network(network_id, ipv6=None):
if ipv6 is None:
ipv6 = CONF.use_ipv6
fake_network = {'id': network_id,
'uuid': '00000000-0000-0000-0000-00000000000000%02d' % network_id,
'label': 'test%d' % network_id,
'injected': False,
'multi_host': False,
'cidr': '192.168.%d.0/24' % network_id,
'cidr_v6': None,
'netmask': '255.255.255.0',
'netmask_v6': None,
'bridge': 'fake_br%d' % network_id,
'bridge_interface': 'fake_eth%d' % network_id,
'gateway': '192.168.%d.1' % network_id,
'gateway_v6': None,
'broadcast': '192.168.%d.255' % network_id,
'dns1': '192.168.%d.3' % network_id,
'dns2': '192.168.%d.4' % network_id,
'dns3': '192.168.%d.3' % network_id,
'vlan': None,
'host': None,
'project_id': 'fake_project',
'vpn_public_address': '192.168.%d.2' % network_id,
'vpn_public_port': None,
'vpn_private_address': None,
'dhcp_start': None,
'rxtx_base': network_id * 10,
'priority': None,
'deleted': False,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'mtu': None,
'dhcp_server': '192.168.%d.1' % network_id,
'enable_dhcp': True,
'share_address': False}
if ipv6:
fake_network['cidr_v6'] = '2001:db8:0:%x::/64' % network_id
fake_network['gateway_v6'] = '2001:db8:0:%x::1' % network_id
fake_network['netmask_v6'] = '64'
if CONF.flat_injected:
fake_network['injected'] = True
return fake_network
def fake_vif(x):
return{'id': x,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:%02x' % x,
'uuid': '00000000-0000-0000-0000-00000000000000%02d' % x,
'network_id': x,
'instance_uuid': 'fake-uuid'}
def floating_ip_ids():
for i in xrange(1, 100):
yield i
def fixed_ip_ids():
for i in xrange(1, 100):
yield i
floating_ip_id = floating_ip_ids()
fixed_ip_id = fixed_ip_ids()
def next_fixed_ip(network_id, num_floating_ips=0):
next_id = fixed_ip_id.next()
f_ips = [FakeModel(**next_floating_ip(next_id))
for i in xrange(num_floating_ips)]
return {'id': next_id,
'network_id': network_id,
'address': '192.168.%d.%03d' % (network_id, (next_id + 99)),
'instance_uuid': 1,
'allocated': False,
'reserved': False,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'leased': True,
'host': HOST,
'deleted': 0,
'network': fake_network(network_id),
'virtual_interface': fake_vif(network_id),
# and since network_id and vif_id happen to be equivalent
'virtual_interface_id': network_id,
'floating_ips': f_ips}
def next_floating_ip(fixed_ip_id):
next_id = floating_ip_id.next()
return {'id': next_id,
'address': '10.10.10.%03d' % (next_id + 99),
'fixed_ip_id': fixed_ip_id,
'project_id': None,
'auto_assigned': False}
def ipv4_like(ip, match_string):
ip = ip.split('.')
match_octets = match_string.split('.')
for i, octet in enumerate(match_octets):
if octet == '*':
continue
if octet != ip[i]:
return False
return True
def fake_get_instance_nw_info(stubs, num_networks=1, ips_per_vif=2,
floating_ips_per_fixed_ip=0):
# stubs is the self.stubs from the test
# ips_per_vif is the number of ips each vif will have
# num_floating_ips is number of float ips for each fixed ip
network = network_manager.FlatManager(host=HOST)
network.db = db
# reset the fixed and floating ip generators
global floating_ip_id, fixed_ip_id, fixed_ips
floating_ip_id = floating_ip_ids()
fixed_ip_id = fixed_ip_ids()
fixed_ips = []
def fixed_ips_fake(*args, **kwargs):
global fixed_ips
ips = [next_fixed_ip(i, floating_ips_per_fixed_ip)
for i in xrange(1, num_networks + 1)
for j in xrange(ips_per_vif)]
fixed_ips = ips
return ips
def update_cache_fake(*args, **kwargs):
pass
stubs.Set(db, 'fixed_ip_get_by_instance', fixed_ips_fake)
stubs.Set(db, 'instance_info_cache_update', update_cache_fake)
class FakeContext(nova.context.RequestContext):
def is_admin(self):
return True
nw_model = network.get_instance_nw_info(
FakeContext('fakeuser', 'fake_project'),
0, 3, None)
return nw_model
def stub_out_nw_api_get_instance_nw_info(stubs, func=None,
num_networks=1,
ips_per_vif=1,
floating_ips_per_fixed_ip=0):
def get_instance_nw_info(self, context, instance, conductor_api=None):
return fake_get_instance_nw_info(stubs, num_networks=num_networks,
ips_per_vif=ips_per_vif,
floating_ips_per_fixed_ip=floating_ips_per_fixed_ip)
if func is None:
func = get_instance_nw_info
stubs.Set(network_api.API, 'get_instance_nw_info', func)
def stub_out_network_cleanup(stubs):
stubs.Set(network_api.API, 'deallocate_for_instance',
lambda *args, **kwargs: None)
_real_functions = {}
def set_stub_network_methods(stubs):
global _real_functions
cm = compute_manager.ComputeManager
if not _real_functions:
_real_functions = {
'_get_instance_nw_info': cm._get_instance_nw_info,
'_allocate_network': cm._allocate_network,
'_deallocate_network': cm._deallocate_network}
def fake_networkinfo(*args, **kwargs):
return network_model.NetworkInfo()
def fake_async_networkinfo(*args, **kwargs):
return network_model.NetworkInfoAsyncWrapper(fake_networkinfo)
stubs.Set(cm, '_get_instance_nw_info', fake_networkinfo)
stubs.Set(cm, '_allocate_network', fake_async_networkinfo)
stubs.Set(cm, '_deallocate_network', lambda *args, **kwargs: None)
def unset_stub_network_methods(stubs):
global _real_functions
if _real_functions:
cm = compute_manager.ComputeManager
for name in _real_functions:
stubs.Set(cm, name, _real_functions[name])
def stub_compute_with_ips(stubs):
orig_get = compute_api.API.get
orig_get_all = compute_api.API.get_all
orig_create = compute_api.API.create
def fake_get(*args, **kwargs):
return _get_instances_with_cached_ips(orig_get, *args, **kwargs)
def fake_get_all(*args, **kwargs):
return _get_instances_with_cached_ips(orig_get_all, *args, **kwargs)
def fake_create(*args, **kwargs):
return _create_instances_with_cached_ips(orig_create, *args, **kwargs)
def fake_pci_device_get_by_addr(context, node_id, dev_addr):
return test_pci_device.fake_db_dev
stubs.Set(db, 'pci_device_get_by_addr', fake_pci_device_get_by_addr)
stubs.Set(compute_api.API, 'get', fake_get)
stubs.Set(compute_api.API, 'get_all', fake_get_all)
stubs.Set(compute_api.API, 'create', fake_create)
def _get_fake_cache():
def _ip(ip, fixed=True, floats=None):
ip_dict = {'address': ip, 'type': 'fixed'}
if not fixed:
ip_dict['type'] = 'floating'
if fixed and floats:
ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
return ip_dict
info = [{'address': 'aa:bb:cc:dd:ee:ff',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'private',
'subnets': [{'cidr': '192.168.0.0/24',
'ips': [_ip('192.168.0.3')]}]}}]
if CONF.use_ipv6:
ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
'ips': [_ip(ipv6_addr)]})
return jsonutils.dumps(info)
def _get_instances_with_cached_ips(orig_func, *args, **kwargs):
"""Kludge the cache into instance(s) without having to create DB
entries
"""
instances = orig_func(*args, **kwargs)
context = args[0]
fake_device = objects.PciDevice.get_by_dev_addr(context, 1, 'a')
def _info_cache_for(instance):
info_cache = dict(test_instance_info_cache.fake_info_cache,
network_info=_get_fake_cache(),
instance_uuid=instance['uuid'])
if isinstance(instance, obj_base.NovaObject):
_info_cache = objects.InstanceInfoCache(context)
objects.InstanceInfoCache._from_db_object(context, _info_cache,
info_cache)
info_cache = _info_cache
instance['info_cache'] = info_cache
if isinstance(instances, (list, obj_base.ObjectListBase)):
for instance in instances:
_info_cache_for(instance)
pci_device.claim(fake_device, instance)
pci_device.allocate(fake_device, instance)
else:
_info_cache_for(instances)
pci_device.claim(fake_device, instances)
pci_device.allocate(fake_device, instances)
return instances
def _create_instances_with_cached_ips(orig_func, *args, **kwargs):
"""Kludge the above kludge so that the database doesn't get out
of sync with the actual instance.
"""
instances, reservation_id = orig_func(*args, **kwargs)
fake_cache = _get_fake_cache()
for instance in instances:
instance['info_cache']['network_info'] = fake_cache
db.instance_info_cache_update(args[1], instance['uuid'],
{'network_info': fake_cache})
return (instances, reservation_id)
|
|
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for scripts/linters/python_linter.py."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import multiprocessing
import os
from core.tests import test_utils
from . import pre_commit_linter
from . import python_linter
LINTER_TESTS_DIR = os.path.join(os.getcwd(), 'scripts', 'linters', 'test_files')
VALID_PY_FILEPATH = os.path.join(LINTER_TESTS_DIR, 'valid.py')
VALID_TEST_FILEPATH = os.path.join(LINTER_TESTS_DIR, 'valid_test_file_test.py')
PYTHON_UTILS_FILEPATH = os.path.join(os.getcwd(), 'python_utils.py')
INVALID_IMPORT_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_import_order.py')
INVALID_TEST_ONLY_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_test_only.py')
INVALID_PYCODESTYLE_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_pycodestyle_error.py')
INVALID_PYTHON3_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_python_three.py')
INVALID_DOCSTRING_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_docstring.py')
INVALID_JOBS_ONE_OFF_FILEPATHS = [
'scripts/linters/test_files/invalid_duplicate_jobs_one_off.py']
VALID_JOBS_ONE_OFF_FILEPATHS = [
'scripts/linters/test_files/valid_jobs_one_off.py']
INVALID_PROD_VALIDATION_JOBS_ONE_OFF_FILEPATHS = [
'scripts/linters/test_files/invalid_duplicate_prod_validation_jobs_one_off'
'.py', 'scripts/linters/test_files/invalid_prod_validation_jobs_one_off.py']
NAME_SPACE = multiprocessing.Manager().Namespace()
PROCESSES = multiprocessing.Manager().dict()
NAME_SPACE.files = pre_commit_linter.FileCache()
FILE_CACHE = NAME_SPACE.files
class PythonLintChecksManagerTests(test_utils.LinterTestBase):
"""Test for python linter."""
def test_unsorted_import_order(self):
lint_task_report = python_linter.ThirdPartyPythonLintChecksManager(
[INVALID_IMPORT_FILEPATH]).check_import_order()
self.assert_same_list_elements([
'FAILED Import order check failed'], lint_task_report.get_report())
self.assertEqual('Import order', lint_task_report.name)
self.assertTrue(lint_task_report.failed)
def test_sorted_import_order(self):
lint_task_report = python_linter.ThirdPartyPythonLintChecksManager(
[VALID_PY_FILEPATH]).check_import_order()
self.assertEqual(
['SUCCESS Import order check passed'],
lint_task_report.get_report())
self.assertEqual('Import order', lint_task_report.name)
self.assertFalse(lint_task_report.failed)
def test_all_jobs_are_listed_in_the_job_registry_file_with_duplicacy(self):
lint_task_report = python_linter.PythonLintChecksManager(
INVALID_JOBS_ONE_OFF_FILEPATHS + VALID_JOBS_ONE_OFF_FILEPATHS,
FILE_CACHE
).check_that_all_jobs_are_listed_in_the_job_registry_file()
self.assert_same_list_elements([
'Found one-off jobs with duplicate names: '
'CollectionMigrationOneOffJob'], lint_task_report.trimmed_messages)
self.assert_same_list_elements([
'Found one-off jobs not listed in jobs_registry file: '
'CollectionsMigrationOneOffJob'], lint_task_report.trimmed_messages)
self.assertEqual('Job registry', lint_task_report.name)
self.assertTrue(lint_task_report.failed)
def test_all_jobs_are_listed_in_the_job_registry_file_with_success(self):
lint_task_report = python_linter.PythonLintChecksManager(
VALID_JOBS_ONE_OFF_FILEPATHS, FILE_CACHE
).check_that_all_jobs_are_listed_in_the_job_registry_file()
self.assertEqual(
['SUCCESS Job registry check passed'],
lint_task_report.get_report())
self.assertEqual('Job registry', lint_task_report.name)
self.assertFalse(lint_task_report.failed)
def test_jobs_are_listed_in_job_registry_file_with_duplicate_prod_job(self):
lint_task_report = python_linter.PythonLintChecksManager(
INVALID_PROD_VALIDATION_JOBS_ONE_OFF_FILEPATHS, FILE_CACHE
).check_that_all_jobs_are_listed_in_the_job_registry_file()
self.assert_same_list_elements([
'Found validation jobs with duplicate names: '
'PendingDeletionRequestModelAuditOneOffJob'
], lint_task_report.trimmed_messages)
self.assert_same_list_elements([
'Found validation jobs not listed in jobs_registry file: '
'PendingDeletionRequestModelAuditOneOffJobs'
], lint_task_report.trimmed_messages)
self.assertEqual('Job registry', lint_task_report.name)
self.assertTrue(lint_task_report.failed)
def test_custom_linter_with_test_only_in_non_test_file(self):
lint_task_report = python_linter.PythonLintChecksManager(
[INVALID_TEST_ONLY_FILEPATH], FILE_CACHE).check_non_test_files()
self.assert_same_list_elements([
'Line 35: Please do not use \'test_only\' in the non-test '
'file.'], lint_task_report.trimmed_messages)
self.assertEqual('Function definition', lint_task_report.name)
self.assertTrue(lint_task_report.failed)
def test_custom_linter_with_test_function_in_test_file(self):
lint_task_report = python_linter.PythonLintChecksManager(
[VALID_TEST_FILEPATH], FILE_CACHE).check_non_test_files()
self.assertEqual(
['SUCCESS Function definition check passed'],
lint_task_report.get_report())
self.assertEqual('Function definition', lint_task_report.name)
self.assertFalse(lint_task_report.failed)
def test_valid_file_with_pylint(self):
lint_task_report = python_linter.ThirdPartyPythonLintChecksManager(
[VALID_PY_FILEPATH]).lint_py_files()
self.assertEqual(
['SUCCESS Pylint check passed'], lint_task_report.get_report())
self.assertEqual('Pylint', lint_task_report.name)
self.assertFalse(lint_task_report.failed)
def test_invalid_file_with_pylint_error(self):
lint_task_report = python_linter.ThirdPartyPythonLintChecksManager(
[INVALID_DOCSTRING_FILEPATH]).lint_py_files()
self.assert_same_list_elements(
['W: 27, 0: Period is not used at the end of the docstring.'],
lint_task_report.trimmed_messages)
self.assertEqual('Pylint', lint_task_report.name)
self.assertTrue(lint_task_report.failed)
def test_get_trimmed_error_output(self):
lint_messages = [
'************* Module oppia.scripts.linters.test_files.invalid_'
'docstring', '\n',
'W: 27, 0: Period is not used at the end of the docstring. '
'(no-period-used)', '\n', '\n',
'---------------------------------------------------'
'---------------', '\n',
'Your code has been rated at 8.75/10 '
'(previous run: 8.75/10, +0.00)', u'\n', u'\n']
trimmed_messages = python_linter.ThirdPartyPythonLintChecksManager(
[INVALID_DOCSTRING_FILEPATH]).get_trimmed_error_output(
lint_messages)
self.assertEqual(
trimmed_messages,
'************* Module oppia.scripts.linters.test_files.'
'invalid_docstring\n\n\nW: 27, 0: Period is not used at '
'the end of the docstring. \n\n\n\n\n')
def test_python_utils_file_with_no_files(self):
lint_task_report = python_linter.ThirdPartyPythonLintChecksManager(
[PYTHON_UTILS_FILEPATH]
).lint_py_files_for_python3_compatibility()
self.assert_same_list_elements([
'There are no Python files to lint for Python 3 '
'compatibility.'], lint_task_report[0].get_report())
self.assertEqual(
'Pylint for Python 3 compatibility', lint_task_report[0].name)
self.assertFalse(lint_task_report[0].failed)
def test_for_python_three_incompatibility(self):
lint_task_report = python_linter.ThirdPartyPythonLintChecksManager(
[INVALID_PYTHON3_FILEPATH]
).lint_py_files_for_python3_compatibility()
self.assert_same_list_elements(
['W: 21, 0: import missing `from __future__ import '
'absolute_import` (no-absolute-import)'],
lint_task_report.get_report())
self.assertEqual(
'Pylint for Python 3 compatibility', lint_task_report.name)
self.assertTrue(lint_task_report.failed)
def test_custom_linter_with_no_files(self):
lint_task_report = python_linter.PythonLintChecksManager(
[], FILE_CACHE).perform_all_lint_checks()
self.assert_same_list_elements(
['There are no Python files to lint.'],
lint_task_report[0].get_report())
self.assertEqual('Python lint', lint_task_report[0].name)
self.assertFalse(lint_task_report[0].failed)
def test_third_party_linter_with_no_files(self):
lint_task_report = python_linter.ThirdPartyPythonLintChecksManager(
[]).perform_all_lint_checks()
self.assert_same_list_elements(
['There are no Python files to lint.'],
lint_task_report[0].get_report())
self.assertEqual('Python lint', lint_task_report[0].name)
self.assertFalse(lint_task_report[0].failed)
def test_third_party_perform_all_lint_checks(self):
lint_task_report = python_linter.ThirdPartyPythonLintChecksManager(
[INVALID_PYCODESTYLE_FILEPATH]).perform_all_lint_checks()
self.assertTrue(isinstance(lint_task_report, list))
def test_custom_perform_all_lint_checks(self):
lint_task_report = python_linter.PythonLintChecksManager(
[INVALID_PYCODESTYLE_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
self.assertTrue(isinstance(lint_task_report, list))
def test_pycodestyle_with_error_message(self):
lint_task_report = python_linter.ThirdPartyPythonLintChecksManager(
[INVALID_PYCODESTYLE_FILEPATH]).lint_py_files()
self.assert_same_list_elements(
['27:1: E302 expected 2 blank lines, found 1'],
lint_task_report.trimmed_messages)
self.assertEqual('Pylint', lint_task_report.name)
self.assertTrue(lint_task_report.failed)
def test_get_linters_with_success(self):
custom_linter, third_party_linter = python_linter.get_linters(
[VALID_PY_FILEPATH], FILE_CACHE)
self.assertTrue(
isinstance(custom_linter, python_linter.PythonLintChecksManager))
self.assertTrue(
isinstance(
third_party_linter,
python_linter.ThirdPartyPythonLintChecksManager))
|
|
#!/usr/bin/env python
#
# Co-ordinates and utility functions for simulating
# Whitelees windfarm near Ayr, Scotland
#
# Copyright (c) 2017 DevicePilot Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import random
import device, sim
import wind
WPerTurbine = 1600000 # Whitelees produces about 500MW peak from about 300 turbines
cutInSpeed = 3.5 # Wind has to be at least this strong (m/s) for turbine to turn
fullPowerSpeed = 14
cutOutSpeed = 25
windScaling = 77 # Turn normalised speed into m/s (77m/s is the max ever wind speed seen in the UK - so far!)
def belowCutIn(windSpeed):
return windSpeed < cutInSpeed
def aboveCutOut(windSpeed):
return windSpeed > cutOutSpeed
def windspeedToPower(windSpeed):
if belowCutIn(windSpeed) or aboveCutOut(windSpeed):
return 0
proportion = (windSpeed-cutInSpeed)/(fullPowerSpeed-cutInSpeed)
proportion = min(1.0, proportion)
return proportion * WPerTurbine
def windAndPower(epochSecs, hashObject=0):
"""hashObject can be anything, to add a bit of uniqueness to the reading a bit unique, e.g. device ID or location"""
timeDither = abs(hash(hashObject)) % 300 # Add up to 5 minutes time dither
strengthDither = 0.8 + 0.2 * (abs(hash(hashObject)) % 1000) / 1000.0 # Dither strength by 20%
windSpeed = wind.windStrength(epochSecs + timeDither) * windScaling * strengthDither
windSpeed = abs(windSpeed)
return windSpeed, windspeedToPower(windSpeed)
def deviceSetup():
"""Set up devices."""
for i in range(len(turbineCoordinates)):
(longitude, latitude, _) = turbineCoordinates[i]
props = {
"$id" : "-".join([format(random.randrange(0,255),'02x') for _ in range(6)]), # A 6-byte MAC address 01-23-45-67-89-ab
"label" : "Turbine "+str(i+1),
"icon" : "turbine",
"is_demo_device" : True,
"longitude" : longitude,
"latitude" : latitude,
"parent" : "Gateway "+str(1 + int(i / 60)) # 6 gateways for 319 devices
}
d = device.device(props, autoTick=False)
sim.injectEventDelta(sim.hours(1), tickDeviceHourly, d)
# Set up gateways
for i in range(len(gatewayCoordinates)):
(longitude, latitude, commsrel) = gatewayCoordinates[i]
props = {
"$id" : "-".join([format(random.randrange(0,255),'02x') for _ in range(6)]), # A 6-byte MAC address 01-23-45-67-89-ab
"label" : "Gateway "+str(i+1),
"icon" : "cloud",
"is_demo_device" : True,
"longitude" : longitude,
"latitude" : latitude
}
d = device.device(props, autoTick=False)
d.setCommsReliability(sim.hours(24), commsrel)
sim.injectEventDelta(sim.hours(1), tickGatewayHourly, d)
def tickDeviceHourly(d):
"""Propagate this device's gateway comms state to this device (i.e. if gateway is offline, then this device is offline)."""
g = d.getProperty("parent")
commsOK = device.getDeviceByProperty("label",g).getCommsOK()
d.setCommsOK(commsOK)
# Update performance stats
(wind,power) = windAndPower(sim.getTime(), d)
props = {
"windspeed" : wind,
"outputWatts" : power
}
d.setProperties(props)
sim.injectEventDelta(sim.hours(1), tickDeviceHourly, d)
def tickGatewayHourly(d):
d.setProperties({ "heartbeat" : 1 })
sim.injectEventDelta(sim.hours(1), tickGatewayHourly, d)
if __name__ == "__main__":
minWind = 1000000
maxWind = 0
minPower = 1000000
maxPower = 0
for time in range(0,60*60*24*365,300):
w, p = windAndPower(time+10000000)
print w,p
minWind = min(minWind, w)
maxWind = max(maxWind, w)
minPower = min(minPower, p)
maxPower = max(maxPower, p)
print "Wind:",minWind, maxWind," Power:",minPower,maxPower
LWM2M_mapping = { # Map Synth property names to LWM2M objectID/_/resourceID magic number pairs (see LWM2M.py)
"$id" : (3,2), # Device/Serial Number
"firmware" : (3,3), # Device/Firmware version
"latitude" : (6,0), # Location/Latitude
"longitude" : (6,1) # Location/Longitude
}
gatewayCoordinates = [ # (longitude, latitude, comms_reliability)
(-4.32,55.66,0.80),
(-4.34,55.72,1.00),
(-4.33,55.67,0.99),
(-4.25,55.68,0.98),
(-4.23,55.69,0.99),
(-4.39,55.75,1.00)
]
turbineCoordinates = [ # (longitude, latitude, altitude=n/a)
(-4.363121360910497,55.7211469299052,0), # 1
(-4.325124512049068,55.63628343371141,0),
(-4.34040035282158,55.64623066519115,0),
(-4.331758314493804,55.63384845761354,0),
(-4.334820757335801,55.64199483214338,0),
(-4.326215788358425,55.64757065882198,0),
(-4.318111669402656,55.63653529058389,0),
(-4.321499782843911,55.66399671430121,0),
(-4.328419972015841,55.64357537933062,0),
(-4.323549468858507,55.63933906760848,0), # 10
(-4.340580345213691,55.63556282031495,0),
(-4.33763896239485,55.64404416154925,0),
(-4.343196971428442,55.64071081064638,0),
(-4.344112501030253,55.64240073503275,0),
(-4.330641162628069,55.63876624713618,0),
(-4.358868524187752,55.65184245973983,0),
(-4.353158973449944,55.64541061471368,0),
(-4.349524513556053,55.64094089519216,0),
(-4.334714875264101,55.6484662052382,0),
(-4.332175616264743,55.6463309101435,0), # 20
(-4.331456792529428,55.65527932603607,0),
(-4.354499014349899,55.64172015349701,0),
(-4.357106941081657,55.64670592971693,0),
(-4.34784811060004,55.64713179251536,0),
(-4.346978508700301,55.64442959931017,0),
(-4.329996483151517,55.65115252636912,0),
(-4.34178291161582,55.64931312750933,0),
(-4.343237305897337,55.65212096361334,0),
(-4.337686889504441,55.654020178972,0),
(-4.354988860396974,55.65005434708387,0), # 30
(-4.349355315463566,55.64994779486451,0),
(-4.336240166990583,55.65126467143753,0),
(-4.325176661457769,55.65419808189949,0),
(-4.362806379948674,55.65318297702969,0),
(-4.324645595098421,55.73358510763013,0),
(-4.372521926986205,55.65380453332674,0),
(-4.360938953701546,55.64865650447074,0),
(-4.365281674019748,55.64994687738733,0),
(-4.300806412512369,55.67633893699318,0),
(-4.332902230910235,55.65702016019904,0), # 40
(-4.319679483579879,55.66028005095824,0),
(-4.369783780006542,55.6513518152711,0),
(-4.316279345979695,55.65645634325297,0),
(-4.327768620844661,55.65921899969445,0),
(-4.286801909665952,55.6834076790227,0),
(-4.314989865227171,55.66725967035981,0),
(-4.321919207798368,55.66918014420876,0),
(-4.309085048049434,55.67326065432814,0),
(-4.300562341507012,55.67189657355227,0),
(-4.316905348643991,55.67167144728392,0), # 50
(-4.324238925766931,55.67403324636882,0),
(-4.330837580642802,55.66918144216938,0),
(-4.312606333127268,55.66268155249704,0),
(-4.307525445110271,55.66884149673891,0),
(-4.324276871078744,55.67859397986731,0),
(-4.316383113559104,55.68058304202123,0),
(-4.307244287824398,55.68742615925238,0),
(-4.308605504407259,55.67852595050091,0),
(-4.343167012355201,55.70923950447609,0),
(-4.290828203060979,55.67867067306209,0), # 60
(-4.317405771083868,55.67625078008275,0),
(-4.322378859352048,55.68304198814701,0),
(-4.292238841800807,55.68223927147407,0),
(-4.301760980673874,55.68478836905504,0),
(-4.303133807530628,55.69548338199327,0),
(-4.295707386615594,55.68713161141309,0),
(-4.306214300451706,55.69194826636785,0),
(-4.308462904392123,55.69791052437527,0),
(-4.321643966727303,55.71884352603778,0),
(-4.298071205463256,55.69305566690261,0), # 70
(-4.301434406768906,55.68974083211408,0),
(-4.31126447420707,55.69437314218654,0),
(-4.316133727894995,55.69644296554928,0),
(-4.317506707455449,55.69205983220447,0),
(-4.351272691888886,55.72011460031313,0),
(-4.321049185968249,55.69878895271878,0),
(-4.32103650772933,55.70819854781317,0),
(-4.333923846483692,55.70477883711104,0),
(-4.338687418247387,55.70208900248269,0),
(-4.322945255831305,55.6947406374407,0), # 80
(-4.292294013859869,55.69739326500518,0),
(-4.328905386723699,55.70740284465175,0),
(-4.354439321879812,55.70497472476756,0),
(-4.326584339991807,55.69134040764144,0),
(-4.313286893423526,55.70006415223181,0),
(-4.322619811234674,55.70448221376429,0),
(-4.344027611538417,55.70454584222289,0),
(-4.349260553064218,55.70694224075432,0),
(-4.338407709507527,55.7070252161386,0),
(-4.338097431815557,55.71175523744516,0), # 90
(-4.332975922398346,55.71364042991178,0),
(-4.352309479049401,55.71573652683913,0),
(-4.345438279777328,55.71869070095648,0),
(-4.316594082484384,55.71657374578989,0),
(-4.326979592402878,55.71602924612819,0),
(-4.321877673227975,55.71353653319889,0),
(-4.334219898243211,55.73633317077834,0),
(-4.402541307103492,55.70802962740399,0),
(-4.356868388044722,55.72062717083011,0),
(-4.323121815060842,55.74076045698513,0), # 100
(-4.359680317538154,55.71717771797036,0),
(-4.362005188339778,55.72800640645763,0),
(-4.352750404581887,55.72869309973905,0),
(-4.358680993756209,55.72536539554598,0),
(-4.369253259885118,55.72620714326268,0),
(-4.341862080625387,55.72853450909831,0),
(-4.343256653932281,55.72292035409916,0),
(-4.376871960521568,55.72834171513098,0),
(-4.329416304122997,55.73575707432255,0),
(-4.431055817057388,55.71590824669656,0), # 110
(-4.460630394087662,55.74754948151104,0),
(-4.322352904936211,55.73764930142646,0),
(-4.429359609430882,55.7103079473376,0),
(-4.428245770779979,55.70533916706044,0),
(-4.424215059523975,55.70609041162533,0),
(-4.427528512132573,55.70800231896799,0),
(-4.41999912039641,55.7134795660136,0),
(-4.420094555941716,55.70937529089323,0),
(-4.345042607304233,55.66986293136637,0),
(-4.430702602217651,55.71206080649413,0), # 120
(-4.422955291097718,55.71092474690213,0),
(-4.416717205791009,55.71204583632342,0),
(-4.404494625698764,55.71002467965974,0),
(-4.425282538438717,55.71270931791784,0),
(-4.413730448884222,55.71838333932617,0),
(-4.418296286402129,55.71670434835421,0),
(-4.409676126800285,55.71061046912151,0),
(-4.41072741311089,55.7166838835935,0),
(-4.413707613943743,55.70822816544775,0),
(-4.338921858227266,55.67776543578655,0), # 130
(-4.415229561642552,55.71515467254132,0),
(-4.403832923887629,55.71271190707095,0),
(-4.404030551337824,55.71634290485998,0),
(-4.40881143657876,55.71965610335332,0),
(-4.359941154993049,55.69721904078376,0),
(-4.408995303175518,55.70773637505441,0),
(-4.305562865532789,55.64973511614426,0),
(-4.34741418677286,55.67889329945073,0),
(-4.407463732850994,55.71375071259282,0),
(-4.367753324635713,55.69827112358328,0), # 140
(-4.354909932301924,55.67821936385427,0),
(-4.346337423076104,55.67449619144116,0),
(-4.273538549311652,55.65415950011722,0),
(-4.33866249785126,55.67379359648002,0),
(-4.354470085058036,55.67373696973111,0),
(-4.350699410392992,55.68811255863782,0),
(-4.331979214450961,55.68116171226471,0),
(-4.339487120597357,55.68112741496595,0),
(-4.33935224673362,55.6871187239616,0),
(-4.331974285049753,55.67711854869774,0), # 150
(-4.357040107151736,55.69070530469371,0),
(-4.337913528830077,55.66941954093448,0),
(-4.332658099631602,55.67265949485367,0),
(-4.354819816501542,55.69474761371659,0),
(-4.274720976546487,55.6620930037325,0),
(-4.350414708485762,55.6979009688319,0),
(-4.368957568659204,55.70266029144995,0),
(-4.374443148832048,55.69643852191501,0),
(-4.376339444977678,55.70032045009152,0),
(-4.361606643273688,55.70382222681439,0), # 160
(-4.315498173958447,55.70574434926295,0),
(-4.356722286792546,55.70095561649268,0),
(-4.31656693170369,55.71121966144834,0),
(-4.31101506736516,55.7088129506337,0),
(-4.359027354382439,55.70779729098561,0),
(-4.305964393628631,55.71165214425287,0),
(-4.300632235189404,55.70894289266273,0),
(-4.308164387700937,55.70223404254585,0),
(-4.294321947109266,55.70595627000421,0),
(-4.306373043157517,55.70646202849704,0), # 170
(-4.303421668416746,55.70014833051221,0),
(-4.296715751227104,55.70166595585352,0),
(-4.300933770463175,55.70401125358564,0),
(-4.276277811409942,55.65815312602803,0),
(-4.320136176730926,55.6446593947726,0),
(-4.244330783448637,55.68929271103754,0),
(-4.317882741090888,55.64751405202089,0),
(-4.290090665339883,55.65302214801083,0),
(-4.283146082269212,55.65523510049076,0),
(-4.282175803371585,55.65995400636983,0), # 180
(-4.289188788558207,55.65753741007739,0),
(-4.313783314945495,55.65152341452699,0),
(-4.283709503881648,55.66851206448065,0),
(-4.286961920936276,55.66268536535539,0),
(-4.29370094992545,55.65974093839877,0),
(-4.279375969199268,55.65099747887511,0),
(-4.322104391064729,55.65072772843258,0),
(-4.279752977330502,55.66449604661603,0),
(-4.308625988699681,55.65797618545892,0),
(-4.296443002986161,55.65530715333979,0), # 190
(-4.304963466020904,55.65417527215967,0),
(-4.301844651116569,55.65887229630567,0),
(-4.299376497261743,55.66703640920007,0),
(-4.267376492736403,55.68128495129432,0),
(-4.272893337070217,55.66596376945675,0),
(-4.239573291758958,55.67175732178573,0),
(-4.293189911597665,55.67001490109982,0),
(-4.29098232463772,55.6655743454707,0),
(-4.285810361682831,55.67216919713137,0),
(-4.273414477299648,55.6833606813654,0), # 200
(-4.283854854930515,55.67641372864107,0),
(-4.29222419329391,55.6744791173613,0),
(-4.297590102908461,55.66303349485084,0),
(-4.255430405725338,55.67156269617321,0),
(-4.304872165522032,55.66371908888306,0),
(-4.255236980444196,55.68287418998853,0),
(-4.268087351226235,55.68520673746328,0),
(-4.256642136511059,55.69184301095129,0),
(-4.277489847948925,55.69178494448696,0),
(-4.250452797566746,55.69032112793005,0), # 210
(-4.25107146153687,55.68608550146703,0),
(-4.248393705359845,55.68110665227564,0),
(-4.256854940884761,55.68711452811865,0),
(-4.242058752905823,55.66870379162324,0),
(-4.244426805990836,55.68477570163954,0),
(-4.263413167545746,55.68869374364406,0),
(-4.23331131262775,55.67417603250807,0),
(-4.248081056639692,55.67340194574314,0),
(-4.242656994081491,55.68038433858728,0),
(-4.243077193289789,55.67611944906261,0), # 220
(-4.23538753124552,55.66729889952811,0),
(-4.21933010043259,55.68776563984124,0),
(-4.231489240263063,55.67042587656907,0),
(-4.248609178451074,55.66993980148945,0),
(-4.208425558415376,55.68991587344718,0),
(-4.238845687658338,55.68365125172548,0),
(-4.224952525651908,55.66901759655376,0),
(-4.229614131956996,55.67760401177974,0),
(-4.226044603699032,55.68902093929934,0),
(-4.243083304730683,55.66426284625015,0), # 230
(-4.232218869666907,55.68205448170167,0),
(-4.226191428419204,55.68107967227317,0),
(-4.232478493431295,55.68675947471042,0),
(-4.244089954299625,55.66032802156845,0),
(-4.226517466439925,55.68558650349512,0),
(-4.238298915908631,55.68801682391206,0),
(-4.236035427001895,55.66308696477737,0),
(-4.213060275634676,55.68277250140145,0),
(-4.219248372324669,55.68398471552165,0),
(-4.228439738364912,55.66611878712119,0), # 240
(-4.2312945436259,55.6987653123366,0),
(-4.237340508218681,55.69136670590497,0),
(-4.232091039355971,55.69046696923013,0),
(-4.237219347342089,55.65886861530007,0),
(-4.229736139041592,55.66192241838358,0),
(-4.235540418610798,55.67871743621566,0),
(-4.236581778607982,55.6957610849506,0),
(-4.216574109755218,55.69595982038361,0),
(-4.230012808400844,55.69468923566223,0),
(-4.254125402024766,55.67495688989998,0), # 250
(-4.217456243575838,55.70000790881195,0),
(-4.224071213197863,55.69313943700448,0),
(-4.24245250189048,55.69704972669223,0),
(-4.215560435675493,55.69157013987852,0),
(-4.212603463660028,55.68640781359535,0),
(-4.198251475975497,55.67955324183171,0),
(-4.201803581117422,55.6890584228538,0),
(-4.2040563964668,55.69727766931506,0),
(-4.184720409181722,55.69185591506247,0),
(-4.20289735874643,55.7009620031824,0), # 260
(-4.205472477397786,55.68501667818862,0),
(-4.224875713779985,55.69743769520251,0),
(-4.196480230279463,55.69170086768384,0),
(-4.210015297014848,55.69469363584263,0),
(-4.208803261519909,55.70205072693525,0),
(-4.203228921002571,55.69315462398859,0),
(-4.190153197204424,55.69044656510593,0),
(-4.196727690887129,55.69599223885797,0),
(-4.202721579231886,55.65930595119775,0),
(-4.196087834471887,55.68316044093155,0), # 270
(-4.195258029415038,55.6875786126831,0),
(-4.207778371516494,55.66122803945077,0),
(-4.189250295330257,55.68613567629386,0),
(-4.189326297588964,55.68177500429783,0),
(-4.191934147486243,55.67856427171471,0),
(-4.212137502721685,55.66395249740276,0),
(-4.203230661150826,55.65521159154917,0),
(-4.209882066291634,55.6568760142949,0),
(-4.211651005692263,55.65254277970185,0),
(-4.208990560375963,55.64567021867513,0), # 280
(-4.218019351890675,55.65351927691016,0),
(-4.196991671011499,55.65511812528948,0),
(-4.221882901989803,55.65603009429265,0),
(-4.233880023460737,55.65359811813716,0),
(-4.240114385755048,55.65483735044374,0),
(-4.230733167510167,55.65741640582688,0),
(-4.227311474707011,55.65236054554929,0),
(-4.217596023533986,55.64949184032876,0),
(-4.219455204526025,55.64584388269022,0),
(-4.21426153555764,55.64615466851532,0), # 290
(-4.207958760820968,55.65090340826011,0),
(-4.199203268751192,55.64798409679859,0),
(-4.204607283597063,55.64847308932882,0),
(-4.21337302367616,55.64276309420012,0),
(-4.199489708796484,55.65104913254901,0),
(-4.218078925646913,55.64312185927966,0),
(-4.208248878059231,55.6425397885873,0),
(-4.203539931879473,55.64469243172059,0),
(-4.4375576247079,55.75008351952263,0),
(-4.446161232455247,55.7350916539374,0), # 300
(-4.4151232031356,55.74699837146967,0),
(-4.419858334024259,55.74515444062921,0),
(-4.424796309327304,55.74107821011865,0),
(-4.426051081393214,55.73858696994036,0),
(-4.429878063562138,55.73667938847689,0),
(-4.434455098599507,55.73621509668669,0),
(-4.43906958469456,55.73673309457654,0),
(-4.40878792229512,55.75918272145795,0),
(-4.402892512056712,55.75897916879914,0),
(-4.399112399475279,55.75645302270934,0), # 310
(-4.386578691339345,55.7518198319904,0),
(-4.388896068318445,55.75354490068294,0),
(-4.385631974420235,55.75672734246987,0),
(-4.400659689841982,55.73885970355056,0),
(-4.395448383929891,55.73862017611222,0),
(-4.400411652692222,55.7366073788147,0),
(-4.274294300633835,55.71287577535518,0),
(-4.275549769323225,55.716385928269,0),
(-4.384850698380353,55.73566016051677,0) # 319
]
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
try:
from . import GAuth
from . import GCache
except:
import GAuth
import GCache
import logging
import os
import apiclient
from apiclient.http import MediaFileUpload
from apiclient.http import MediaIoBaseDownload
import time
DIR_MIME = 'application/vnd.google-apps.folder'
OAUTH2_SCOPE = 'https://www.googleapis.com/auth/drive'
# Number of bytes to send/receive in each request.
CHUNKSIZE = 2 * 1024 * 1024
# Mimetype to use if one can't be guessed from the file extension.
DEFAULT_MIMETYPE = 'application/octet-stream'
class GDrive(GAuth.GAuth):
def __init__(self, oauth2json = None, oauth2storage = None, scope = OAUTH2_SCOPE):
GAuth.GAuth.__init__(self, oauth2json, oauth2storage, scope)
self.cache = GCache.GCache()
logging.debug('GDrive object created')
def get_id_from_cache(self, name, parent_folder = 'root'):
# Check whether an object does not already exists in cache
return self.cache[(name, parent_folder)]
def get_id_from_gdrive(self, name, parent_folder = 'root'):
logging.info('Fetching metadata for %s in %s' % (name, parent_folder))
q = "name='%s' and '%s' in parents" % (name, parent_folder)
logging.debug("Google Drive query: %s" % q)
param = {
'q' : q,
'fields' : 'files(id)',
}
response = self.service.files().list(**param).execute()
id = response['files'][0]['id']
logging.debug('Data fetched: %s', response['files'])
# Save to the cache
self.cache[id] = (name, parent_folder)
return id
def get_id(self, name, parent_folder = 'root'):
# Try if it exists in cache
try:
return self.get_id_from_cache(name, parent_folder)
except:
pass
# Try if it exists in GDrive
try:
return self.get_id_from_gdrive(name, parent_folder)
except IndexError:
raise KeyError('"%s" in "%s"' % (name, parent_folder))
def is_dir(self, object_id):
logging.debug('Fetching metadata for %s' % (object_id,))
response = self.service.files().get(fileId = object_id, fields = 'mimeType').execute()
logging.debug('mimeType of the object: %s' % response.get('mimeType'))
return response.get('mimeType') == DIR_MIME
def mkdir(self, folder_title, parent_folder = 'root'):
try:
return self.get_id(folder_title, parent_folder)
except:
pass
# Looks like there is no such directory yet, so let's create it
body = {
"name": folder_title,
"parents": [{"id": parent_folder}],
"mimeType": DIR_MIME,
}
directory = self.service.files().create(body = body).execute()
did = directory['id']
logging.debug('Directory %s in %s has been created as ID %s' % (folder_title, parent_folder, did))
self.cache[did] = (folder_title, parent_folder)
return did
def rm(self, object_id):
logging.info("Removing an object with id %s" % object_id)
try:
self.service.files().delete(fileId=object_id).execute()
logging.debug("Object with id %s has been sucesfully removed" % object_id)
except e:
logging.error("Removal of object id %s has failed: %s" % (object_id, str(e)))
raise
try:
del self.cache[object_id]
except:
pass
def ls(self, folder_id = 'root'):
logging.info('Fetching metadata for %s' % (folder_id,))
q = "'%s' in parents" % (folder_id,)
logging.debug("Google Drive query: %s" % q)
param = {
'q' : q,
'fields' : 'files(id,name)',
}
response = self.service.files().list(**param).execute()
ids = list()
for o in response['files']:
# Save to the cache
self.cache[o['id']] = (o['name'], folder_id)
# Append the id to the final list
ids.append(o['id'])
logging.debug('Fetched: %s objects', len(ids))
return id
def upload(self, filename, gdrivename = None, parent_folder = 'root'):
logging.debug('Going to upload file to GDrive. filename=%s , gdrivename=%s , parent_folder=%s' % (filename, gdrivename, parent_folder))
# Convert the name of the file on GDrive in case it is not provided
if gdrivename is None or gdrivename == '':
gdrivename = filename.split('/')[-1]
# Check whether the file does not already exists
try:
self.get_id(gdrivename, parent_folder)
except:
pass
else:
logging.error("The file to upload %s already exists" % gdrivename)
raise FileExistsError(gdrivename)
# Prepare for the file upload
logging.debug("Creating the media object for uploading from %s" % filename)
media = MediaFileUpload(filename, chunksize = CHUNKSIZE, resumable = True)
if not media.mimetype():
logging.debug("MIME type of the file has not been recognized, using the default %s" % DEFAULT_MIMETYPE)
media = MediaFileUpload(filename, mimeType = DEFAULT_MIMETYPE, chunksize = CHUNKSIZE, resumable = True)
body = {
'name': gdrivename,
#'parents': [{"id": parent_folder}],
'parents': [parent_folder],
}
logging.debug('Starting upload of the %s file as %s' % (filename, gdrivename))
request = self.service.files().create(body = body, media_body = media, fields='id')
retry = 5
while retry > 0:
try:
response = None
while response is None:
status, response = request.next_chunk()
if status:
logging.info("Uploaded %d%%." % int(status.progress() * 100))
logging.info("Upload has been completed")
# No need for a retry
retry = -1
except apiclient.errors.HttpError as e:
if e.resp.status in [404]:
# Start the upload all over again.
request = self.service.files().create(body = body, media_body = media, fields='id')
elif e.resp.status in [500, 502, 503, 504]:
# Call next_chunk() again, but use an exponential backoff for repeated errors.
logging.warning('Upload of a chunk has failed, retrying ...')
retry -= 1
time.sleep(3)
else:
# Do not retry. Log the error and fail.
logging.error('The upload has failed: %s' % str(e))
raise
if retry == 0:
logging.error('The upload has failed.')
raise ConnectionError
fid = response.get('id')
self.cache[fid] = (gdrivename, parent_folder)
return fid
def move(self, object_id, folder_id = 'root'):
logging.debug("Copying an object with id %s to a folder with id %s" % (object_id, folder_id))
# Retrieve the existing parents to remove
f = self.service.files().get(fileId = object_id, fields = 'parents').execute()
previous_parents = ",".join(f.get('parents'))
# Move the file to the new folder
f = self.service.files().update(fileId = object_id,
addParents = folder_id,
removeParents = previous_parents,
fields = 'id').execute()
logging.info("Object with id %s has been sucesfully copied to a folder with id %s" % (object_id, folder_id))
return f.get('id')
def download(self, filename, object_id):
logging.debug('Starting download of object %s to %s' % (object_id, filename))
with open(filename, 'wb') as fd:
request = self.service.files().get_media(fileId = object_id)
downloader = MediaIoBaseDownload(fd, request, chunksize = CHUNKSIZE)
done = False
while done is False:
status, done = downloader.next_chunk()
logging.info("Download %d%%." % int(status.progress() * 100))
logging.info('Object %s has been downloaded as %s' % (object_id, filename))
def walk(self, path):
logging.debug('Walking through %s' % path)
dirs = path.split('/')
dirs = [d for d in dirs if d != '']
if len(dirs) == 0:
return 'root'
dirs = ['root'] + dirs
index = 1
oid = None
while index < len(dirs):
logging.debug('Diving into %s/%s' % (dirs[index - 1], dirs[index]))
try:
oid = self.get_id(dirs[index], dirs[index - 1])
except KeyError as e:
logging.info('The artefact has not been found')
return None
dirs[index] = oid
index += 1
logging.info('The artefact has been found as OID=%s' % oid)
return oid
if __name__ == "__main__":
logging.basicConfig(level = logging.DEBUG, format = '%(asctime)s %(levelname)s[%(module)s] %(message)s')
gd = GDrive(oauth2json = '/home/jkurik/.gp.json', oauth2storage = '/home/jkurik/.gp')
gd.auth()
dname = "Nazdarek ke smazani"
did = gd.mkdir(dname)
#print("ID of a new object:", gd.get_id_from_gdrive(dname))
#print("ID of a new object in a cache:", gd.get_id_from_cache(dname))
gd.rm(did)
gd.ls()
#fid = gd.upload('/home/jkurik/the.hacker.playbook.practical.guide.to.penetration.testing.pdf')
#fid = gd.upload('/home/jkurik/kalendar2016v3.xls')
#did = gd.mkdir('tmp')
#gd.is_dir(did)
#gd.move(fid, did)
#gd.download('/tmp/xxx', fid)
#gd.rm(fid)
#gd.rm(did)
|
|
# Copyright 2014 Open Data Science Initiative and other authors. See AUTHORS.txt
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import os
import sys
import numpy as np
import math
class vertex:
def __init__(self, name, id, parents=[], children=[], meta={}):
self.name = name
self.id = id
self.parents = parents
self.children = children
self.meta = meta
def __str__(self):
return self.name + "(" + str(self.id) + ")."
class tree:
def __init__(self):
self.vertices = []
self.vertices.append(vertex(name="root", id=0))
def __str__(self):
index = self.find_root()
return self.branch_str(index)
def branch_str(self, index, indent=""):
out = indent + str(self.vertices[index]) + "\n"
for child in self.vertices[index].children:
out += self.branch_str(child, indent + " ")
return out
def find_children(self):
"""Take a tree and set the children according to the parents.
Takes a tree structure which lists the parents of each vertex
and computes the children for each vertex and places them in."""
for i in range(len(self.vertices)):
self.vertices[i].children = []
for i in range(len(self.vertices)):
for parent in self.vertices[i].parents:
if i not in self.vertices[parent].children:
self.vertices[parent].children.append(i)
def find_parents(self):
"""Take a tree and set the parents according to the children
Takes a tree structure which lists the children of each vertex
and computes the parents for each vertex and places them in."""
for i in range(len(self.vertices)):
self.vertices[i].parents = []
for i in range(len(self.vertices)):
for child in self.vertices[i].children:
if i not in self.vertices[child].parents:
self.vertices[child].parents.append(i)
def find_root(self):
"""Finds the index of the root node of the tree."""
self.find_parents()
index = 0
while len(self.vertices[index].parents) > 0:
index = self.vertices[index].parents[0]
return index
def get_index_by_id(self, id):
"""Give the index associated with a given vertex id."""
for i in range(len(self.vertices)):
if self.vertices[i].id == id:
return i
raise ValueError("Reverse look up of id failed.")
def get_index_by_name(self, name):
"""Give the index associated with a given vertex name.
:param name: the name of a vertex.
:type name: string
:rval index: index of the vertex.
:rtype int:
"""
for i in range(len(self.vertices)):
if self.vertices[i].name == name:
return i
raise ValueError("Reverse look up of name failed.")
def order_vertices(self):
"""Order vertices in the graph such that parents always have a lower index than children."""
ordered = False
while ordered == False:
for i in range(len(self.vertices)):
ordered = True
for parent in self.vertices[i].parents:
if parent > i:
ordered = False
self.swap_vertices(i, parent)
def swap_vertices(self, i, j):
"""
Swap two vertices in the tree structure array.
swap_vertex swaps the location of two vertices in a tree structure array.
:param tree: the tree for which two vertices are to be swapped.
:param i: the index of the first vertex to be swapped.
:param j: the index of the second vertex to be swapped.
:rval tree: the tree structure with the two vertex locations swapped.
"""
store_vertex_i = self.vertices[i]
store_vertex_j = self.vertices[j]
self.vertices[j] = store_vertex_i
self.vertices[i] = store_vertex_j
for k in range(len(self.vertices)):
for swap_list in [self.vertices[k].children, self.vertices[k].parents]:
if i in swap_list:
swap_list[swap_list.index(i)] = -1
if j in swap_list:
swap_list[swap_list.index(j)] = i
if -1 in swap_list:
swap_list[swap_list.index(-1)] = j
def rotation_matrix(xangle, yangle, zangle, order="zxy", degrees=False):
"""Compute the rotation matrix for an angle in each direction. This
is a helper function for computing the rotation matrix for a given
set of angles in a given order.
:param xangle: rotation for x-axis.
:param yangle: rotation for y-axis.
:param zangle: rotation for z-axis.
:param order: the order for the rotations.
"""
if degrees:
xangle = math.radians(xangle)
yangle = math.radians(yangle)
zangle = math.radians(zangle)
# Here we assume we rotate z, then x then y.
c1 = math.cos(xangle) # The x angle
c2 = math.cos(yangle) # The y angle
c3 = math.cos(zangle) # the z angle
s1 = math.sin(xangle)
s2 = math.sin(yangle)
s3 = math.sin(zangle)
# see http://en.wikipedia.org/wiki/Rotation_matrix for
# additional info.
if order == "zxy":
rot_mat = np.array(
[
[c2 * c3 - s1 * s2 * s3, c2 * s3 + s1 * s2 * c3, -s2 * c1],
[-c1 * s3, c1 * c3, s1],
[s2 * c3 + c2 * s1 * s3, s2 * s3 - c2 * s1 * c3, c2 * c1],
]
)
else:
rot_mat = np.eye(3)
for i in range(len(order)):
if order[i] == "x":
rot_mat = np.dot(
np.array([[1, 0, 0], [0, c1, s1], [0, -s1, c1]]), rot_mat
)
elif order[i] == "y":
rot_mat = np.dot(
np.array([[c2, 0, -s2], [0, 1, 0], [s2, 0, c2]]), rot_mat
)
elif order[i] == "z":
rot_mat = np.dot(
np.array([[c3, s3, 0], [-s3, c3, 0], [0, 0, 1]]), rot_mat
)
return rot_mat
# Motion capture data routines.
class skeleton(tree):
def __init__(self):
tree.__init__(self)
def connection_matrix(self):
connection = np.zeros((len(self.vertices), len(self.vertices)), dtype=bool)
for i in range(len(self.vertices)):
for j in range(len(self.vertices[i].children)):
connection[i, self.vertices[i].children[j]] = True
return connection
def to_xyz(self, channels):
raise NotImplementedError(
"this needs to be implemented to use the skeleton class"
)
def finalize(self):
"""After loading in a skeleton ensure parents are correct, vertex
orders are correct and rotation matrices are correct."""
self.find_parents()
self.order_vertices()
self.set_rotation_matrices()
def smooth_angle_channels(self, channels):
"""Remove discontinuities in angle channels so that they don't cause artifacts in algorithms that rely on the smoothness of the functions."""
for vertex in self.vertices:
for col in vertex.meta["rot_ind"]:
if col:
for k in range(1, channels.shape[0]):
diff = channels[k, col] - channels[k - 1, col]
if abs(diff + 360.0) < abs(diff):
channels[k:, col] = channels[k:, col] + 360.0
elif abs(diff - 360.0) < abs(diff):
channels[k:, col] = channels[k:, col] - 360.0
# class bvh_skeleton(skeleton):
# def __init__(self):
# skeleton.__init__(self)
# def to_xyz(self, channels):
class acclaim_skeleton(skeleton):
def __init__(self, file_name=None):
skeleton.__init__(self)
self.documentation = []
self.angle = "deg"
self.length = 1.0
self.mass = 1.0
self.type = "acclaim"
self.vertices[0] = vertex(
name="root",
id=0,
parents=[0],
children=[],
meta={
"orientation": [],
"axis": [0.0, 0.0, 0.0],
"axis_order": [],
"C": np.eye(3),
"Cinv": np.eye(3),
"channels": [],
"bodymass": [],
"confmass": [],
"order": [],
"rot_ind": [],
"pos_ind": [],
"limits": [],
"xyz": np.array([0.0, 0.0, 0.0]),
"rot": np.eye(3),
},
)
if file_name:
self.load_skel(file_name)
def to_xyz(self, channels):
rot_val = list(self.vertices[0].meta["orientation"])
for i in range(len(self.vertices[0].meta["rot_ind"])):
rind = self.vertices[0].meta["rot_ind"][i]
if rind != -1:
rot_val[i] += channels[rind]
self.vertices[0].meta["rot"] = rotation_matrix(
rot_val[0],
rot_val[1],
rot_val[2],
self.vertices[0].meta["axis_order"],
degrees=True,
)
# vertex based store of the xyz location
self.vertices[0].meta["xyz"] = list(self.vertices[0].meta["offset"])
for i in range(len(self.vertices[0].meta["pos_ind"])):
pind = self.vertices[0].meta["pos_ind"][i]
if pind != -1:
self.vertices[0].meta["xyz"][i] += channels[pind]
for i in range(len(self.vertices[0].children)):
ind = self.vertices[0].children[i]
self.get_child_xyz(ind, channels)
xyz = []
for vertex in self.vertices:
xyz.append(vertex.meta["xyz"])
return np.array(xyz)
def get_child_xyz(self, ind, channels):
parent = self.vertices[ind].parents[0]
children = self.vertices[ind].children
rot_val = np.zeros(3)
for j in range(len(self.vertices[ind].meta["rot_ind"])):
rind = self.vertices[ind].meta["rot_ind"][j]
if rind != -1:
rot_val[j] = channels[rind]
else:
rot_val[j] = 0
tdof = rotation_matrix(
rot_val[0],
rot_val[1],
rot_val[2],
self.vertices[ind].meta["order"],
degrees=True,
)
torient = rotation_matrix(
self.vertices[ind].meta["axis"][0],
self.vertices[ind].meta["axis"][1],
self.vertices[ind].meta["axis"][2],
self.vertices[ind].meta["axis_order"],
degrees=True,
)
torient_inv = rotation_matrix(
-self.vertices[ind].meta["axis"][0],
-self.vertices[ind].meta["axis"][1],
-self.vertices[ind].meta["axis"][2],
self.vertices[ind].meta["axis_order"][::-1],
degrees=True,
)
self.vertices[ind].meta["rot"] = np.dot(
np.dot(np.dot(torient_inv, tdof), torient),
self.vertices[parent].meta["rot"],
)
self.vertices[ind].meta["xyz"] = self.vertices[parent].meta["xyz"] + np.dot(
self.vertices[ind].meta["offset"], self.vertices[ind].meta["rot"]
)
for i in range(len(children)):
cind = children[i]
self.get_child_xyz(cind, channels)
def load_channels(self, file_name):
fid = open(file_name, "r")
channels = self.read_channels(fid)
fid.close()
return channels
def load_skel(self, file_name):
"""
Loads an ASF file into a skeleton structure.
:param file_name: The file name to load in.
"""
fid = open(file_name, "r")
self.read_skel(fid)
fid.close()
self.name = file_name
def read_bonedata(self, fid):
"""Read bone data from an acclaim skeleton file stream."""
bone_count = 0
lin = self.read_line(fid)
while lin[0] != ":":
parts = lin.split()
if parts[0] == "begin":
bone_count += 1
self.vertices.append(
vertex(
name="",
id=np.NaN,
meta={
"name": [],
"id": [],
"offset": [],
"orientation": [],
"axis": [0.0, 0.0, 0.0],
"axis_order": [],
"C": np.eye(3),
"Cinv": np.eye(3),
"channels": [],
"bodymass": [],
"confmass": [],
"order": [],
"rot_ind": [],
"pos_ind": [],
"limits": [],
"xyz": np.array([0.0, 0.0, 0.0]),
"rot": np.eye(3),
},
)
)
lin = self.read_line(fid)
elif parts[0] == "id":
self.vertices[bone_count].id = int(parts[1])
lin = self.read_line(fid)
self.vertices[bone_count].children = []
elif parts[0] == "name":
self.vertices[bone_count].name = parts[1]
lin = self.read_line(fid)
elif parts[0] == "direction":
direction = np.array(
[float(parts[1]), float(parts[2]), float(parts[3])]
)
lin = self.read_line(fid)
elif parts[0] == "length":
lgth = float(parts[1])
lin = self.read_line(fid)
elif parts[0] == "axis":
self.vertices[bone_count].meta["axis"] = np.array(
[float(parts[1]), float(parts[2]), float(parts[3])]
)
# order is reversed compared to bvh
self.vertices[bone_count].meta["axis_order"] = parts[-1][::-1].lower()
lin = self.read_line(fid)
elif parts[0] == "dof":
order = []
for i in range(1, len(parts)):
if parts[i] == "rx":
chan = "Xrotation"
order.append("x")
elif parts[i] == "ry":
chan = "Yrotation"
order.append("y")
elif parts[i] == "rz":
chan = "Zrotation"
order.append("z")
elif parts[i] == "tx":
chan = "Xposition"
elif parts[i] == "ty":
chan = "Yposition"
elif parts[i] == "tz":
chan = "Zposition"
elif parts[i] == "l":
chan = "length"
self.vertices[bone_count].meta["channels"].append(chan)
# order is reversed compared to bvh
self.vertices[bone_count].meta["order"] = order[::-1]
lin = self.read_line(fid)
elif parts[0] == "limits":
self.vertices[bone_count].meta["limits"] = [
[float(parts[1][1:]), float(parts[2][:-1])]
]
lin = self.read_line(fid)
while lin != "end":
parts = lin.split()
self.vertices[bone_count].meta["limits"].append(
[float(parts[0][1:]), float(parts[1][:-1])]
)
lin = self.read_line(fid)
self.vertices[bone_count].meta["limits"] = np.array(
self.vertices[bone_count].meta["limits"]
)
elif parts[0] == "end":
self.vertices[bone_count].meta["offset"] = direction * lgth
lin = self.read_line(fid)
return lin
def read_channels(self, fid):
"""Read channels from an acclaim file."""
bones = [[] for i in self.vertices]
num_channels = 0
for vertex in self.vertices:
num_channels = num_channels + len(vertex.meta["channels"])
lin = self.read_line(fid)
while lin != ":DEGREES":
lin = self.read_line(fid)
if lin == "":
raise ValueError("Could not find :DEGREES in " + fid.name)
counter = 0
lin = self.read_line(fid)
while lin:
parts = lin.split()
if len(parts) == 1:
frame_no = int(parts[0])
if frame_no:
counter += 1
if counter != frame_no:
raise ValueError("Unexpected frame number.")
else:
raise ValueError("Single bone name ...")
else:
ind = self.get_index_by_name(parts[0])
bones[ind].append(np.array([float(channel) for channel in parts[1:]]))
lin = self.read_line(fid)
num_frames = counter
channels = np.zeros((num_frames, num_channels))
end_val = 0
for i in range(len(self.vertices)):
vertex = self.vertices[i]
if len(vertex.meta["channels"]) > 0:
start_val = end_val
end_val = end_val + len(vertex.meta["channels"])
for j in range(num_frames):
channels[j, start_val:end_val] = bones[i][j]
self.resolve_indices(i, start_val)
self.smooth_angle_channels(channels)
return channels
def read_documentation(self, fid):
"""Read documentation from an acclaim skeleton file stream."""
lin = self.read_line(fid)
while lin[0] != ":":
self.documentation.append(lin)
lin = self.read_line(fid)
return lin
def read_hierarchy(self, fid):
"""Read hierarchy information from acclaim skeleton file stream."""
lin = self.read_line(fid)
while lin != "end":
parts = lin.split()
if lin != "begin":
ind = self.get_index_by_name(parts[0])
for i in range(1, len(parts)):
self.vertices[ind].children.append(self.get_index_by_name(parts[i]))
lin = self.read_line(fid)
lin = self.read_line(fid)
return lin
def read_line(self, fid):
"""Read a line from a file string and check it isn't either empty or commented before returning."""
lin = "#"
while lin[0] == "#":
lin = fid.readline().strip()
if lin == "":
return lin
return lin
def read_root(self, fid):
"""Read the root node from an acclaim skeleton file stream."""
lin = self.read_line(fid)
while lin[0] != ":":
parts = lin.split()
if parts[0] == "order":
order = []
for i in range(1, len(parts)):
if parts[i].lower() == "rx":
chan = "Xrotation"
order.append("x")
elif parts[i].lower() == "ry":
chan = "Yrotation"
order.append("y")
elif parts[i].lower() == "rz":
chan = "Zrotation"
order.append("z")
elif parts[i].lower() == "tx":
chan = "Xposition"
elif parts[i].lower() == "ty":
chan = "Yposition"
elif parts[i].lower() == "tz":
chan = "Zposition"
elif parts[i].lower() == "l":
chan = "length"
self.vertices[0].meta["channels"].append(chan)
# order is reversed compared to bvh
self.vertices[0].meta["order"] = order[::-1]
elif parts[0] == "axis":
# order is reversed compared to bvh
self.vertices[0].meta["axis_order"] = parts[1][::-1].lower()
elif parts[0] == "position":
self.vertices[0].meta["offset"] = [
float(parts[1]),
float(parts[2]),
float(parts[3]),
]
elif parts[0] == "orientation":
self.vertices[0].meta["orientation"] = [
float(parts[1]),
float(parts[2]),
float(parts[3]),
]
lin = self.read_line(fid)
return lin
def read_skel(self, fid):
"""Loads an acclaim skeleton format from a file stream."""
lin = self.read_line(fid)
while lin:
if lin[0] == ":":
if lin[1:] == "name":
lin = self.read_line(fid)
self.name = lin
elif lin[1:] == "units":
lin = self.read_units(fid)
elif lin[1:] == "documentation":
lin = self.read_documentation(fid)
elif lin[1:] == "root":
lin = self.read_root(fid)
elif lin[1:] == "bonedata":
lin = self.read_bonedata(fid)
elif lin[1:] == "hierarchy":
lin = self.read_hierarchy(fid)
elif lin[1:8] == "version":
lin = self.read_line(fid)
continue
else:
if not lin:
self.finalize()
return
lin = self.read_line(fid)
else:
raise ValueError("Unrecognised file format")
self.finalize()
def read_units(self, fid):
"""Read units from an acclaim skeleton file stream."""
lin = self.read_line(fid)
while lin[0] != ":":
parts = lin.split()
if parts[0] == "mass":
self.mass = float(parts[1])
elif parts[0] == "length":
self.length = float(parts[1])
elif parts[0] == "angle":
self.angle = parts[1]
lin = self.read_line(fid)
return lin
def resolve_indices(self, index, start_val):
"""Get indices for the skeleton from the channels when loading in channel data."""
channels = self.vertices[index].meta["channels"]
base_channel = start_val
rot_ind = -np.ones(3, dtype=int)
pos_ind = -np.ones(3, dtype=int)
for i in range(len(channels)):
if channels[i] == "Xrotation":
rot_ind[0] = base_channel + i
elif channels[i] == "Yrotation":
rot_ind[1] = base_channel + i
elif channels[i] == "Zrotation":
rot_ind[2] = base_channel + i
elif channels[i] == "Xposition":
pos_ind[0] = base_channel + i
elif channels[i] == "Yposition":
pos_ind[1] = base_channel + i
elif channels[i] == "Zposition":
pos_ind[2] = base_channel + i
self.vertices[index].meta["rot_ind"] = list(rot_ind)
self.vertices[index].meta["pos_ind"] = list(pos_ind)
def set_rotation_matrices(self):
"""Set the meta information at each vertex to contain the correct matrices C and Cinv as prescribed by the rotations and rotation orders."""
for i in range(len(self.vertices)):
self.vertices[i].meta["C"] = rotation_matrix(
self.vertices[i].meta["axis"][0],
self.vertices[i].meta["axis"][1],
self.vertices[i].meta["axis"][2],
self.vertices[i].meta["axis_order"],
degrees=True,
)
# Todo: invert this by applying angle operations in reverse order
self.vertices[i].meta["Cinv"] = np.linalg.inv(self.vertices[i].meta["C"])
# Utilities for loading in x,y,z data.
def load_text_data(dataset, directory, centre=True):
"""Load in a data set of marker points from the Ohio State University C3D motion capture files (http://accad.osu.edu/research/mocap/mocap_data.htm)."""
points, point_names = parse_text(os.path.join(directory, dataset + ".txt"))[0:2]
# Remove markers where there is a NaN
present_index = [
i
for i in range(points[0].shape[1])
if not (
np.any(np.isnan(points[0][:, i]))
or np.any(np.isnan(points[0][:, i]))
or np.any(np.isnan(points[0][:, i]))
)
]
point_names = point_names[present_index]
for i in range(3):
points[i] = points[i][:, present_index]
if centre:
points[i] = (points[i].T - points[i].mean(axis=1)).T
# Concatanate the X, Y and Z markers together
Y = np.concatenate((points[0], points[1], points[2]), axis=1)
Y = Y / 400.0
connect = read_connections(os.path.join(directory, "connections.txt"), point_names)
return Y, connect
def parse_text(file_name):
"""Parse data from Ohio State University text mocap files (http://accad.osu.edu/research/mocap/mocap_data.htm)."""
# Read the header
fid = open(file_name, "r")
point_names = np.array(fid.readline().split())[2:-1:3]
fid.close()
for i in range(len(point_names)):
point_names[i] = point_names[i][0:-2]
# Read the matrix data
S = np.loadtxt(file_name, skiprows=1)
field = np.uint(S[:, 0])
times = S[:, 1]
S = S[:, 2:]
# Set the -9999.99 markers to be not present
S[S == -9999.99] = np.NaN
# Store x, y and z in different arrays
points = []
points.append(S[:, 0:-1:3])
points.append(S[:, 1:-1:3])
points.append(S[:, 2:-1:3])
return points, point_names, times
def read_connections(file_name, point_names):
"""Read a file detailing which markers should be connected to which for motion capture data."""
connections = []
fid = open(file_name, "r")
line = fid.readline()
while line:
connections.append(np.array(line.split(",")))
connections[-1][0] = connections[-1][0].strip()
connections[-1][1] = connections[-1][1].strip()
line = fid.readline()
connect = np.zeros((len(point_names), len(point_names)), dtype=bool)
for i in range(len(point_names)):
for j in range(len(point_names)):
for k in range(len(connections)):
if (
connections[k][0] == point_names[i]
and connections[k][1] == point_names[j]
):
connect[i, j] = True
connect[j, i] = True
break
return connect
skel = acclaim_skeleton()
|
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 6 12:33:23 2015
@author: nouamanelaanait
"""
import warnings, os
import numpy as np
import h5py
import multiprocess as mp
from skimage.morphology import dilation, disk
from scipy.ndimage import gaussian_filter
import cv2
class Corrector(object):
''' This is an Object used to contain a data set and has methods to do image correction
such as background subtraction, normalization, denoising, etc.
Basically, methods that manipulate the intensity values pixel by pixel.
'''
def __init__(self):
self.__init__
self.data = []
self.proc_data = []
def loadData(self, dataset):
''' This is a Method that loads h5 Dataset to be corrected.
input: h5 dataset
'''
if not isinstance(dataset, h5py.Dataset):
warnings.warn( 'Error: Data must be an h5 Dataset object' )
else:
self.data = dataset
def loadprocData(self, dataset):
''' This is a Method that loads processed data to be corrected.
input: h5 dataset or numpy ndarray
'''
self.proc_data = dataset
def clearData(self):
''' This is a Method to clear the data from the object.
'''
del self.data
self.data = []
def getData(self):
''' This is a Method that returns loaded h5 Dataset.
output: h5 dataset
'''
return self.data
def bkgSubtract(self, impRead, impDark, impFlat = None):
''' This is a Method to correct image for READ, DARK noise of a ccd camera.
Also normalizes with respect to FlatField if provided.
Input:
impRead: np.ndarray
impDark: np.ndarray
impFlat = None, flatfield Image
The size of the above images must match the raw image.
'''
dset = self.data
att = dset.attrs
exposure = att['seconds']
impList = []
if impFlat is None:
try:
for t, raw in zip(exposure, dset):
imp = raw - impRead - impDark*t
imp[imp<0] = 1
impList.append(imp)
corrstack = np.dstack([impList])
self.proc_data = corrstack
except ValueError:
warnings.warn('Error: Correction Files might not have the same size as the Image.')
else:
try:
for t, raw in zip(exposure, dset):
imp = (raw - impRead - impDark*t)/impFlat
imp[imp<0] = 1
impList.append(imp)
corrstack = np.dstack([impList])
self.proc_data = corrstack
except ValueError:
warnings.warn('Error: Correction Files might not have the same size as the Image.')
# self.proc_data = corrstack
return corrstack
def normalize(self, monitor = None, trans = 'Trans', time = 'seconds', calibration = 1.0, use_processed = False):
''' Normalize the counts in the image by filters, exposure/or as monitor (e.g ion chamber)
and converts counts to photons if calibration is provided.
Input:
calibration = float.
monitor = string for the monitor attribute.
use_processed = bool, if True uses the latest corrected data.
'''
if use_processed:
data = self.proc_data
else:
data = self.data
dset = self.data
att = dset.attrs
exposure = att[time]
transmission = att[trans]
impList = []
if monitor is None:
try:
for raw,t,trans in zip(data, exposure, transmission):
imp = calibration * raw / trans /t
impList.append(imp)
normstack = np.dstack([impList])
self.proc_data = normstack
except ValueError:
warnings.warn('Error: Couldnt broadcast seconds, trans and dset together')
else:
mon = att[monitor]
try:
for raw,trans,m in zip(data,transmission,mon):
imp = calibration * raw / trans / m
impList.append(imp)
normstack = np.dstack([impList])
self.proc_data = normstack
except ValueError:
warnings.warn('Error: Couldnt broadcast trans, monitor, and dset arrays together.')
return normstack
def flatField(self, processes , sigmaMorph = 100, radiusMorph = 25, winLP = 7, sigmaBlur = 100,
use_processed = False, method = 'morphology'):
''' Find illumination function of the data and divide out the image for flat-field correction.
Input:
processes = int, number of processors to use.
sigmaMorph = float, sigma of gaussian filter.
radiusMorph = int, radius of structuring element.
winLP = int, diameter of low-spatial frequency to cut out.
use_processed = bool, if True uses the latest corrected data.
method = 'morphology': A combination of morphological Filter (Dilation) and Gaussian (Blur) filter.
method = 'lowpass filter': gaussian lowpass filter applied directly to image.
Output:
stack of flat-field corrected images.
'''
if use_processed:
data = self.proc_data
else:
data = self.data
def __flattenbyMorpho(imp):
dilated = cv2.dilate(imp, disk(radiusMorph))
#dilated = dilation(np.log10(imp), selem= disk(radius))
illum= gaussian_filter(dilated, sigmaMorph)
proc = imp*1.0/illum
return proc
def __flattenbyLPF(imp):
#Fourier transform
FT = np.fft.fft2(imp)
FT_shift = np.fft.fftshift(FT)
#Cut-out low frequency in FT by gaussian LP filter
row, col = imp.shape
cen_row,cen_col = row/2 , col/2
arr = np.ones(imp.shape)
arr[cen_row-winLP/2:cen_row+winLP/2, cen_col-winLP/2:cen_col+winLP/2] = 0
sigma = winLP
LPfilt = gaussian_filter(arr, sigma/2, truncate = 5.)
LPfilt[LPfilt < 0.9999] = 0.
LPfilt = gaussian_filter(LPfilt, sigma, truncate = 5.)
LPfilt[cen_row,cen_col]=1
# Apply LP Filter and Inverse Fourier transform
FT_ishift = np.fft.ifftshift(FT_shift*LPfilt)
iFT = np.fft.ifft2(FT_ishift)
proc = np.abs(iFT)
return proc
# start pool of workers
print('launching %i kernels...'%(processes))
pool = mp.Pool(processes)
tasks = [(imp) for imp in data]
chunk = int(data.shape[0]/processes)
if method == 'morphology':
jobs = pool.imap(__flattenbyMorpho, tasks, chunksize = chunk)
elif method == 'lowpass filter':
jobs = pool.imap(__flattenbyLPF, tasks, chunksize = chunk)
# get images from different processes
results =[]
print('Extracting Flattened Images...')
try:
for j in jobs:
results.append(j)
except ValueError:
warnings.warn('Error: There appears to be a problem with the processing')
# pack all images into 3d array
flatstack = np.array([imp for imp in results])
# close the pool
print('Closing down the kernels... \n')
pool.close()
# self.proc_data = flatstack
return flatstack
|
|
# (c) 2012-2013, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible import errors
from ansible import utils
import os
import ansible.utils.template as template
class Task(object):
__slots__ = [
'name', 'meta', 'action', 'only_if', 'when', 'async_seconds', 'async_poll_interval',
'notify', 'module_name', 'module_args', 'module_vars', 'default_vars',
'play', 'notified_by', 'tags', 'register',
'delegate_to', 'first_available_file', 'ignore_errors',
'local_action', 'transport', 'sudo', 'sudo_user', 'sudo_pass',
'items_lookup_plugin', 'items_lookup_terms', 'environment', 'args',
'any_errors_fatal', 'changed_when', 'always_run'
]
# to prevent typos and such
VALID_KEYS = [
'name', 'meta', 'action', 'only_if', 'async', 'poll', 'notify',
'first_available_file', 'include', 'tags', 'register', 'ignore_errors',
'delegate_to', 'local_action', 'transport', 'sudo', 'sudo_user',
'sudo_pass', 'when', 'connection', 'environment', 'args',
'any_errors_fatal', 'changed_when', 'always_run'
]
def __init__(self, play, ds, module_vars=None, default_vars=None, additional_conditions=None):
''' constructor loads from a task or handler datastructure '''
# meta directives are used to tell things like ansible/playbook to run
# operations like handler execution. Meta tasks are not executed
# normally.
if 'meta' in ds:
self.meta = ds['meta']
self.tags = []
return
else:
self.meta = None
library = os.path.join(play.basedir, 'library')
if os.path.exists(library):
utils.plugins.module_finder.add_directory(library)
for x in ds.keys():
# code to allow for saying "modulename: args" versus "action: modulename args"
if x in utils.plugins.module_finder:
if 'action' in ds:
raise errors.AnsibleError("multiple actions specified in task %s" % (ds.get('name', ds['action'])))
if isinstance(ds[x], dict):
if 'args' in ds:
raise errors.AnsibleError("can't combine args: and a dict for %s: in task %s" % (x, ds.get('name', "%s: %s" % (x, ds[x]))))
ds['args'] = ds[x]
ds[x] = ''
elif ds[x] is None:
ds[x] = ''
if not isinstance(ds[x], basestring):
raise errors.AnsibleError("action specified for task %s has invalid type %s" % (ds.get('name', "%s: %s" % (x, ds[x])), type(ds[x])))
ds['action'] = x + " " + ds[x]
ds.pop(x)
# code to allow "with_glob" and to reference a lookup plugin named glob
elif x.startswith("with_"):
plugin_name = x.replace("with_","")
if plugin_name in utils.plugins.lookup_loader:
ds['items_lookup_plugin'] = plugin_name
ds['items_lookup_terms'] = ds[x]
ds.pop(x)
else:
raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name))
elif x in [ 'changed_when', 'when']:
ds[x] = "jinja2_compare %s" % (ds[x])
elif x.startswith("when_"):
if 'when' in ds:
raise errors.AnsibleError("multiple when_* statements specified in task %s" % (ds.get('name', ds['action'])))
when_name = x.replace("when_","")
ds['when'] = "%s %s" % (when_name, ds[x])
ds.pop(x)
elif not x in Task.VALID_KEYS:
raise errors.AnsibleError("%s is not a legal parameter in an Ansible task or handler" % x)
self.module_vars = module_vars
self.default_vars = default_vars
self.play = play
# load various attributes
self.name = ds.get('name', None)
self.tags = [ 'all' ]
self.register = ds.get('register', None)
self.sudo = utils.boolean(ds.get('sudo', play.sudo))
self.environment = ds.get('environment', {})
# rather than simple key=value args on the options line, these represent structured data and the values
# can be hashes and lists, not just scalars
self.args = ds.get('args', {})
if self.sudo:
self.sudo_user = ds.get('sudo_user', play.sudo_user)
self.sudo_pass = ds.get('sudo_pass', play.playbook.sudo_pass)
else:
self.sudo_user = None
self.sudo_pass = None
# Both are defined
if ('action' in ds) and ('local_action' in ds):
raise errors.AnsibleError("the 'action' and 'local_action' attributes can not be used together")
# Both are NOT defined
elif (not 'action' in ds) and (not 'local_action' in ds):
raise errors.AnsibleError("'action' or 'local_action' attribute missing in task \"%s\"" % ds.get('name', '<Unnamed>'))
# Only one of them is defined
elif 'local_action' in ds:
self.action = ds.get('local_action', '')
self.delegate_to = '127.0.0.1'
else:
self.action = ds.get('action', '')
self.delegate_to = ds.get('delegate_to', None)
self.transport = ds.get('connection', ds.get('transport', play.transport))
if isinstance(self.action, dict):
if 'module' not in self.action:
raise errors.AnsibleError("'module' attribute missing from action in task \"%s\"" % ds.get('name', '%s' % self.action))
if self.args:
raise errors.AnsibleError("'args' cannot be combined with dict 'action' in task \"%s\"" % ds.get('name', '%s' % self.action))
self.args = self.action
self.action = self.args.pop('module')
# delegate_to can use variables
if not (self.delegate_to is None):
# delegate_to: localhost should use local transport
if self.delegate_to in ['127.0.0.1', 'localhost']:
self.transport = 'local'
# notified by is used by Playbook code to flag which hosts
# need to run a notifier
self.notified_by = []
# if no name is specified, use the action line as the name
if self.name is None:
self.name = self.action
# load various attributes
self.only_if = ds.get('only_if', 'True')
self.when = ds.get('when', None)
self.changed_when = ds.get('changed_when', None)
if self.changed_when is not None:
self.changed_when = utils.compile_when_to_only_if(self.changed_when)
self.async_seconds = int(ds.get('async', 0)) # not async by default
self.async_poll_interval = int(ds.get('poll', 10)) # default poll = 10 seconds
self.notify = ds.get('notify', [])
self.first_available_file = ds.get('first_available_file', None)
self.items_lookup_plugin = ds.get('items_lookup_plugin', None)
self.items_lookup_terms = ds.get('items_lookup_terms', None)
self.ignore_errors = ds.get('ignore_errors', False)
self.any_errors_fatal = ds.get('any_errors_fatal', play.any_errors_fatal)
self.always_run = ds.get('always_run', False)
# action should be a string
if not isinstance(self.action, basestring):
raise errors.AnsibleError("action is of type '%s' and not a string in task. name: %s" % (type(self.action).__name__, self.name))
# notify can be a string or a list, store as a list
if isinstance(self.notify, basestring):
self.notify = [ self.notify ]
# split the action line into a module name + arguments
tokens = self.action.split(None, 1)
if len(tokens) < 1:
raise errors.AnsibleError("invalid/missing action in task. name: %s" % self.name)
self.module_name = tokens[0]
self.module_args = ''
if len(tokens) > 1:
self.module_args = tokens[1]
import_tags = self.module_vars.get('tags',[])
if type(import_tags) in [str,unicode]:
# allow the user to list comma delimited tags
import_tags = import_tags.split(",")
# handle mutually incompatible options
incompatibles = [ x for x in [ self.first_available_file, self.items_lookup_plugin ] if x is not None ]
if len(incompatibles) > 1:
raise errors.AnsibleError("with_(plugin), and first_available_file are mutually incompatible in a single task")
# make first_available_file accessable to Runner code
if self.first_available_file:
self.module_vars['first_available_file'] = self.first_available_file
if self.items_lookup_plugin is not None:
self.module_vars['items_lookup_plugin'] = self.items_lookup_plugin
self.module_vars['items_lookup_terms'] = self.items_lookup_terms
# allow runner to see delegate_to option
self.module_vars['delegate_to'] = self.delegate_to
# make some task attributes accessible to Runner code
self.module_vars['ignore_errors'] = self.ignore_errors
self.module_vars['register'] = self.register
self.module_vars['changed_when'] = self.changed_when
self.module_vars['always_run'] = self.always_run
# tags allow certain parts of a playbook to be run without running the whole playbook
apply_tags = ds.get('tags', None)
if apply_tags is not None:
if type(apply_tags) in [ str, unicode ]:
self.tags.append(apply_tags)
elif type(apply_tags) == list:
self.tags.extend(apply_tags)
self.tags.extend(import_tags)
if self.when is not None:
if self.only_if != 'True':
raise errors.AnsibleError('when obsoletes only_if, only use one or the other')
self.only_if = utils.compile_when_to_only_if(self.when)
if additional_conditions:
self.only_if = [ self.only_if ]
self.only_if.extend(additional_conditions)
|
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
This module implements a class for handling URLs.
"""
import urllib
import cgi
from paste import request
# Imported lazily from FormEncode:
variabledecode = None
__all__ = ["URL", "Image"]
def html_quote(v):
if v is None:
return ''
return cgi.escape(str(v), 1)
def url_quote(v):
if v is None:
return ''
return urllib.quote(str(v))
url_unquote = urllib.unquote
def js_repr(v):
if v is None:
return 'null'
elif v is False:
return 'false'
elif v is True:
return 'true'
elif isinstance(v, list):
return '[%s]' % ', '.join(map(js_repr, v))
elif isinstance(v, dict):
return '{%s}' % ', '.join(
['%s: %s' % (js_repr(key), js_repr(value))
for key, value in v])
elif isinstance(v, str):
return repr(v)
elif isinstance(v, unicode):
# @@: how do you do Unicode literals in Javascript?
return repr(v.encode('UTF-8'))
elif isinstance(v, (float, int)):
return repr(v)
elif isinstance(v, long):
return repr(v).lstrip('L')
elif hasattr(v, '__js_repr__'):
return v.__js_repr__()
else:
raise ValueError(
"I don't know how to turn %r into a Javascript representation"
% v)
class URLResource(object):
"""
This is an abstract superclass for different kinds of URLs
"""
default_params = {}
def __init__(self, url, vars=None, attrs=None,
params=None):
self.url = url or '/'
self.vars = vars or []
self.attrs = attrs or {}
self.params = self.default_params.copy()
self.original_params = params or {}
if params:
self.params.update(params)
#@classmethod
def from_environ(cls, environ, with_query_string=True,
with_path_info=True, script_name=None,
path_info=None, querystring=None):
url = request.construct_url(
environ, with_query_string=False,
with_path_info=with_path_info, script_name=script_name,
path_info=path_info)
if with_query_string:
if querystring is None:
vars = request.parse_querystring(environ)
else:
vars = cgi.parse_qsl(
querystring,
keep_blank_values=True,
strict_parsing=False)
else:
vars = None
v = cls(url, vars=vars)
return v
from_environ = classmethod(from_environ)
def __call__(self, *args, **kw):
res = self._add_positional(args)
res = res._add_vars(kw)
return res
def __getitem__(self, item):
if '=' in item:
name, value = item.split('=', 1)
return self._add_vars({url_unquote(name): url_unquote(value)})
return self._add_positional((item,))
def attr(self, **kw):
for key in kw.keys():
if key.endswith('_'):
kw[key[:-1]] = kw[key]
del kw[key]
new_attrs = self.attrs.copy()
new_attrs.update(kw)
return self.__class__(self.url, vars=self.vars,
attrs=new_attrs,
params=self.original_params)
def param(self, **kw):
new_params = self.original_params.copy()
new_params.update(kw)
return self.__class__(self.url, vars=self.vars,
attrs=self.attrs,
params=new_params)
def coerce_vars(self, vars):
global variabledecode
need_variable_encode = False
for key, value in vars.items():
if isinstance(value, dict):
need_variable_encode = True
if key.endswith('_'):
vars[key[:-1]] = vars[key]
del vars[key]
if need_variable_encode:
if variabledecode is None:
from formencode import variabledecode
vars = variabledecode.variable_encode(vars)
return vars
def var(self, **kw):
kw = self.coerce_vars(kw)
new_vars = self.vars + kw.items()
return self.__class__(self.url, vars=new_vars,
attrs=self.attrs,
params=self.original_params)
def setvar(self, **kw):
"""
Like ``.var(...)``, except overwrites keys, where .var simply
extends the keys. Setting a variable to None here will
effectively delete it.
"""
kw = self.coerce_vars(kw)
new_vars = []
for name, values in self.vars:
if name in kw:
continue
new_vars.append((name, values))
new_vars.extend(kw.items())
return self.__class__(self.url, vars=new_vars,
attrs=self.attrs,
params=self.original_params)
def setvars(self, **kw):
"""
Creates a copy of this URL, but with all the variables set/reset
(like .setvar(), except clears past variables at the same time)
"""
return self.__class__(self.url, vars=kw.items(),
attrs=self.attrs,
params=self.original_params)
def addpath(self, *paths):
u = self
for path in paths:
path = str(path).lstrip('/')
new_url = u.url
if not new_url.endswith('/'):
new_url += '/'
u = u.__class__(new_url+path, vars=u.vars,
attrs=u.attrs,
params=u.original_params)
return u
__div__ = addpath
def become(self, OtherClass):
return OtherClass(self.url, vars=self.vars,
attrs=self.attrs,
params=self.original_params)
def href__get(self):
s = self.url
if self.vars:
s += '?'
vars = []
for name, val in self.vars:
if isinstance(val, (list, tuple)):
val = [v for v in val if v is not None]
elif val is None:
continue
vars.append((name, val))
s += urllib.urlencode(vars, True)
return s
href = property(href__get)
def __repr__(self):
base = '<%s %s' % (self.__class__.__name__,
self.href or "''")
if self.attrs:
base += ' attrs(%s)' % (
' '.join(['%s="%s"' % (html_quote(n), html_quote(v))
for n, v in self.attrs.items()]))
if self.original_params:
base += ' params(%s)' % (
', '.join(['%s=%r' % (n, v)
for n, v in self.attrs.items()]))
return base + '>'
def html__get(self):
if not self.params.get('tag'):
raise ValueError(
"You cannot get the HTML of %r until you set the "
"'tag' param'" % self)
content = self._get_content()
tag = '<%s' % self.params.get('tag')
attrs = ' '.join([
'%s="%s"' % (html_quote(n), html_quote(v))
for n, v in self._html_attrs()])
if attrs:
tag += ' ' + attrs
tag += self._html_extra()
if content is None:
return tag + ' />'
else:
return '%s>%s</%s>' % (tag, content, self.params.get('tag'))
html = property(html__get)
def _html_attrs(self):
return self.attrs.items()
def _html_extra(self):
return ''
def _get_content(self):
"""
Return the content for a tag (for self.html); return None
for an empty tag (like ``<img />``)
"""
raise NotImplementedError
def _add_vars(self, vars):
raise NotImplementedError
def _add_positional(self, args):
raise NotImplementedError
class URL(URLResource):
r"""
>>> u = URL('http://localhost')
>>> u
<URL http://localhost>
>>> u = u['view']
>>> str(u)
'http://localhost/view'
>>> u['//foo'].param(content='view').html
'<a href="http://localhost/view/foo">view</a>'
>>> u.param(confirm='Really?', content='goto').html
'<a href="http://localhost/view" onclick="return confirm(\'Really?\')">goto</a>'
>>> u(title='See "it"', content='goto').html
'<a href="http://localhost/view?title=See+%22it%22">goto</a>'
>>> u('another', var='fuggetaboutit', content='goto').html
'<a href="http://localhost/view/another?var=fuggetaboutit">goto</a>'
>>> u.attr(content='goto').html
Traceback (most recent call last):
....
ValueError: You must give a content param to <URL http://localhost/view attrs(content="goto")> generate anchor tags
>>> str(u['foo=bar%20stuff'])
'http://localhost/view?foo=bar+stuff'
"""
default_params = {'tag': 'a'}
def __str__(self):
return self.href
def _get_content(self):
if not self.params.get('content'):
raise ValueError(
"You must give a content param to %r generate anchor tags"
% self)
return self.params['content']
def _add_vars(self, vars):
url = self
for name in ('confirm', 'content'):
if name in vars:
url = url.param(**{name: vars.pop(name)})
if 'target' in vars:
url = url.attr(target=vars.pop('target'))
return url.var(**vars)
def _add_positional(self, args):
return self.addpath(*args)
def _html_attrs(self):
attrs = self.attrs.items()
attrs.insert(0, ('href', self.href))
if self.params.get('confirm'):
attrs.append(('onclick', 'return confirm(%s)'
% js_repr(self.params['confirm'])))
return attrs
def onclick_goto__get(self):
return 'location.href=%s; return false' % js_repr(self.href)
onclick_goto = property(onclick_goto__get)
def button__get(self):
return self.become(Button)
button = property(button__get)
def js_popup__get(self):
return self.become(JSPopup)
js_popup = property(js_popup__get)
class Image(URLResource):
r"""
>>> i = Image('/images')
>>> i = i / '/foo.png'
>>> i.html
'<img src="/images/foo.png" />'
>>> str(i['alt=foo'])
'<img src="/images/foo.png" alt="foo" />'
>>> i.href
'/images/foo.png'
"""
default_params = {'tag': 'img'}
def __str__(self):
return self.html
def _get_content(self):
return None
def _add_vars(self, vars):
return self.attr(**vars)
def _add_positional(self, args):
return self.addpath(*args)
def _html_attrs(self):
attrs = self.attrs.items()
attrs.insert(0, ('src', self.href))
return attrs
class Button(URLResource):
r"""
>>> u = URL('/')
>>> u = u / 'delete'
>>> b = u.button['confirm=Sure?'](id=5, content='del')
>>> str(b)
'<button onclick="if (confirm(\'Sure?\')) {location.href=\'/delete?id=5\'}; return false">del</button>'
"""
default_params = {'tag': 'button'}
def __str__(self):
return self.html
def _get_content(self):
if self.params.get('content'):
return self.params['content']
if self.attrs.get('value'):
return self.attrs['content']
# @@: Error?
return None
def _add_vars(self, vars):
button = self
if 'confirm' in vars:
button = button.param(confirm=vars.pop('confirm'))
if 'content' in vars:
button = button.param(content=vars.pop('content'))
return button.var(**vars)
def _add_positional(self, args):
return self.addpath(*args)
def _html_attrs(self):
attrs = self.attrs.items()
onclick = 'location.href=%s' % js_repr(self.href)
if self.params.get('confirm'):
onclick = 'if (confirm(%s)) {%s}' % (
js_repr(self.params['confirm']), onclick)
onclick += '; return false'
attrs.insert(0, ('onclick', onclick))
return attrs
class JSPopup(URLResource):
r"""
>>> u = URL('/')
>>> u = u / 'view'
>>> j = u.js_popup(content='view')
>>> j.html
'<a href="/view" onclick="window.open(\'/view\', \'_blank\'); return false" target="_blank">view</a>'
"""
default_params = {'tag': 'a', 'target': '_blank'}
def _add_vars(self, vars):
button = self
for var in ('width', 'height', 'stripped', 'content'):
if var in vars:
button = button.param(**{var: vars.pop(var)})
return button.var(**vars)
def _window_args(self):
p = self.params
features = []
if p.get('stripped'):
p['location'] = p['status'] = p['toolbar'] = '0'
for param in 'channelmode directories fullscreen location menubar resizable scrollbars status titlebar'.split():
if param not in p:
continue
v = p[param]
if v not in ('yes', 'no', '1', '0'):
if v:
v = '1'
else:
v = '0'
features.append('%s=%s' % (param, v))
for param in 'height left top width':
if not p.get(param):
continue
features.append('%s=%s' % (param, p[param]))
args = [self.href, p['target']]
if features:
args.append(','.join(features))
return ', '.join(map(js_repr, args))
def _html_attrs(self):
attrs = self.attrs.items()
onclick = ('window.open(%s); return false'
% self._window_args())
attrs.insert(0, ('target', self.params['target']))
attrs.insert(0, ('onclick', onclick))
attrs.insert(0, ('href', self.href))
return attrs
def _get_content(self):
if not self.params.get('content'):
raise ValueError(
"You must give a content param to %r generate anchor tags"
% self)
return self.params['content']
def _add_positional(self, args):
return self.addpath(*args)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
|
import chainer
from chainer import configuration
from chainer import functions
from chainer import initializers
from chainer import link
from chainer.utils import argument
from chainer import variable
class BatchNormalization(link.Link):
"""Batch normalization layer on outputs of linear or convolution functions.
This link wraps the :func:`~chainer.functions.batch_normalization` and
:func:`~chainer.functions.fixed_batch_normalization` functions.
It runs in three modes: training mode, fine-tuning mode, and testing mode.
In training mode, it normalizes the input by *batch statistics*. It also
maintains approximated population statistics by moving averages, which can
be used for instant evaluation in testing mode. Training mode is enabled
when ``chainer.config.train`` is set to ``True`` and :meth:`__call__`
is invoked with ``finetune=False`` (the default is False).
In fine-tuning mode, it accumulates the input to compute *population
statistics*. In order to correctly compute the population statistics, a
user must use this mode to feed mini-batches running through whole training
dataset. Finetuning mode is enabled when ``chainer.config.train`` is set to
``True`` and :meth:`__call__` is invoked with ``finetune=True``.
In testing mode, it uses pre-computed population statistics to normalize
the input variable. The population statistics is approximated if it is
computed by training mode, or accurate if it is correctly computed by
fine-tuning mode. Testing mode is enabled when ``chainer.config.train``
is set to ``False``.
Args:
size (int, tuple of ints, or None): Size (or shape) of channel
dimensions. If ``None``, the size will be determined from
dimension(s) of the input batch during the first forward pass.
decay (float): Decay rate of moving average. It is used on training.
eps (float): Epsilon value for numerical stability.
dtype (numpy.dtype): Type to use in computing.
use_gamma (bool): If ``True``, use scaling parameter. Otherwise, use
unit(1) which makes no effect.
use_beta (bool): If ``True``, use shifting parameter. Otherwise, use
unit(0) which makes no effect.
axis (int or tuple of int): Axis over which normalization is
performed. When axis is ``None``, it is determined from input
dimensions. For example, if ``x.ndim`` is 4, axis becomes (0, 2, 3)
and normalization is performed over 0th, 2nd and 3rd axis of input.
If it is 2, axis becomes (0) and normalization is performed
over 0th axis of input. When a tuple of int is given to this
option, numbers in the tuple must be being sorted in ascending
order. For example, (0, 2) is OK, but (2, 0) is not.
initial_gamma: Initializer of the scaling parameter. The default value
is ``1``.
initial_beta: Initializer of the shifting parameter. The default value
is ``0``.
initial_avg_mean: Initializer of the moving average of population mean.
The default value is ``0``.
initial_avg_var: Initializer of the moving average of population
variance. The default value is ``1``.
.. note::
From v5.0.0, the initial value of the population variance is changed to
1. It does not change the behavior of training, but the resulting model
may have a slightly different behavior on inference. To emulate the
old behavior, pass ``initial_avg_var=0`` for training.
See: `Batch Normalization: Accelerating Deep Network Training by Reducing\
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`_
.. seealso::
:func:`~chainer.functions.batch_normalization`,
:func:`~chainer.functions.fixed_batch_normalization`
Attributes:
gamma (~chainer.Variable): Scaling parameter.
beta (~chainer.Variable): Shifting parameter.
avg_mean (numpy.ndarray or cupy.ndarray): Population mean.
avg_var (numpy.ndarray or cupy.ndarray): Population variance.
N (int): Count of batches given for fine-tuning.
decay (float): Decay rate of moving average. It is used on training.
eps (float): Epsilon value for numerical stability. This value is added
to the batch variances.
.. admonition:: Example
>>> x = np.arange(12).reshape(4, 3).astype(np.float32) ** 2
>>> x
array([[ 0., 1., 4.],
[ 9., 16., 25.],
[ 36., 49., 64.],
[ 81., 100., 121.]], dtype=float32)
>>> bn = chainer.links.BatchNormalization(3)
>>> bn(x)
variable([[-1. , -1.0664359 , -1.1117983 ],
[-0.71428573, -0.6714596 , -0.6401263 ],
[ 0.14285715, 0.19748813, 0.23583598],
[ 1.5714287 , 1.5404074 , 1.5160885 ]])
>>> (x - x.mean(axis=0)) / np.sqrt(x.var(axis=0) + 2e-5)
array([[-1. , -1.0664359 , -1.1117983 ],
[-0.71428573, -0.6714596 , -0.6401263 ],
[ 0.14285715, 0.19748813, 0.235836 ],
[ 1.5714285 , 1.5404074 , 1.5160886 ]], dtype=float32)
There are several ways to make a BatchNormalization link.
Consider an input of batched 10 images of 32x32 with 3 channels.
>>> x = np.random.randn(10, 3, 32, 32).astype(np.float32)
1. Give the parameter size:
To normalize for each channel, give the number of channels
to ``size``.
>>> bn = chainer.links.BatchNormalization(3)
>>> bn.avg_mean.shape
(3,)
>>> bn.beta += 2.0
>>> bn.gamma *= 5.0
>>> list(sorted(bn.namedparams())) # doctest: +ELLIPSIS
[('/beta', variable([2., ...])), ('/gamma', variable([5., ...]))]
>>> y = bn(x)
>>> y.shape
(10, 3, 32, 32)
>>> np.testing.assert_allclose(
... y.array.mean(axis=(0, 2, 3)), bn.beta.array, atol=1e-6)
>>> np.testing.assert_allclose(
... y.array.std(axis=(0, 2, 3)),
... bn.gamma.array, atol=1e-3)
To normalize for each channel for each pixel, ``size`` should
be the tuple of the dimensions.
>>> bn = chainer.links.BatchNormalization((3, 32, 32))
>>> bn.avg_mean.shape
(3, 32, 32)
>>> y = bn(x)
>>> y.shape
(10, 3, 32, 32)
>>> np.testing.assert_allclose(
... y.array.mean(axis=0), bn.beta.array, atol=1e-6)
>>> np.testing.assert_allclose(
... y.array.std(axis=0),
... bn.gamma.array, atol=1e-3)
By default, channel axis is (or starts from) the 1st axis of the
input shape.
2. Give the aggregate axes:
from Chainer v5
With ``axis`` option, similarly to NumPy, you may specify the
aggregate axes, which are treated as the "batch" axes for the
batch statistics.
You can omit ``size`` if ``axis`` is given. In this case, creation
of persistent values ``avg_mean``, ``avg_var`` and parameters
``beta``, ``gamma`` is deferred until first forward propagation.
The examples in 1. corresponds to the following, respectively.
>>> bn = chainer.links.BatchNormalization(axis=(0, 2, 3))
>>> print(bn.avg_mean)
None
>>> y = bn(x)
>>> bn.avg_mean.shape
(3,)
>>> bn = chainer.links.BatchNormalization(axis=0)
>>> print(bn.avg_mean)
None
>>> y = bn(x)
>>> bn.avg_mean.shape
(3, 32, 32)
"""
gamma = None
beta = None
avg_mean = None
avg_var = None
def __init__(self, size=None, decay=0.9, eps=2e-5, dtype=None,
use_gamma=True, use_beta=True,
initial_gamma=None, initial_beta=None, axis=None,
initial_avg_mean=None, initial_avg_var=None):
super(BatchNormalization, self).__init__()
if size is None and axis is None:
raise RuntimeError('size or axis is required')
self._initial_avg_mean = initial_avg_mean
self._initial_avg_var = initial_avg_var
self.N = 0
self.register_persistent('N')
self.decay = decay
self.eps = eps
if isinstance(axis, int):
axis = (axis,)
self.axis = axis
self._dtype = chainer.get_dtype(dtype)
with self.init_scope():
if use_gamma:
if initial_gamma is None:
initial_gamma = 1
gamma_initializer = \
initializers._get_initializer(initial_gamma)
gamma_initializer.dtype = self._dtype
self.gamma = variable.Parameter(gamma_initializer)
if use_beta:
if initial_beta is None:
initial_beta = 0
beta_initializer = initializers._get_initializer(initial_beta)
beta_initializer.dtype = self._dtype
self.beta = variable.Parameter(beta_initializer)
if size is not None:
self._initialize_params(size)
def _initialize_params(self, shape):
self.avg_mean = self._init_array(self._initial_avg_mean, 0, shape)
self._initial_avg_mean = None
self.register_persistent('avg_mean')
self.avg_var = self._init_array(self._initial_avg_var, 1, shape)
self._initial_avg_var = None
self.register_persistent('avg_var')
if self.gamma is not None:
self.gamma.initialize(shape)
if self.beta is not None:
self.beta.initialize(shape)
def _init_array(self, initializer, default_value, size):
if initializer is None:
initializer = default_value
initializer = initializers._get_initializer(initializer)
return initializers.generate_array(
initializer, size, self.xp, dtype=self._dtype)
def forward(self, x, **kwargs):
"""forward(self, x, finetune=False)
Invokes the forward propagation of BatchNormalization.
In training mode, the BatchNormalization computes moving averages of
mean and variance for evaluation during training, and normalizes the
input using batch statistics.
.. warning::
``test`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('train', False)``.
See :func:`chainer.using_config`.
Args:
x (Variable): Input variable.
finetune (bool): If it is in the training mode and ``finetune`` is
``True``, BatchNormalization runs in fine-tuning mode; it
accumulates the input array to compute population statistics
for normalization, and normalizes the input using batch
statistics.
"""
finetune, = argument.parse_kwargs(
kwargs, ('finetune', False),
test='test argument is not supported anymore. '
'Use chainer.using_config')
if self.avg_mean is None:
param_shape = tuple([
d
for i, d in enumerate(x.shape)
if i not in self.axis])
self._initialize_params(param_shape)
gamma = self.gamma
if gamma is None:
with chainer.using_device(self.device):
gamma = self.xp.ones(
self.avg_mean.shape, dtype=x.dtype)
beta = self.beta
if beta is None:
with chainer.using_device(self.device):
beta = self.xp.zeros(
self.avg_mean.shape, dtype=x.dtype)
if configuration.config.train:
if finetune:
self.N += 1
decay = 1. - 1. / self.N
else:
decay = self.decay
avg_mean = self.avg_mean
avg_var = self.avg_var
if chainer.config.in_recomputing:
# Do not update statistics when extra forward computation is
# called.
if finetune:
self.N -= 1 # Revert the count
avg_mean = None
avg_var = None
ret = functions.batch_normalization(
x, gamma, beta, eps=self.eps, running_mean=avg_mean,
running_var=avg_var, decay=decay, axis=self.axis)
else:
# Use running average statistics or fine-tuned statistics.
mean = self.avg_mean
var = self.avg_var
ret = functions.fixed_batch_normalization(
x, gamma, beta, mean, var, self.eps, axis=self.axis)
return ret
def start_finetuning(self):
"""Resets the population count for collecting population statistics.
This method can be skipped if it is the first time to use the
fine-tuning mode. Otherwise, this method should be called before
starting the fine-tuning mode again.
"""
self.N = 0
|
|
# -*- coding: utf-8 -*-
'''
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from resources.lib.modules import cleangenre
from resources.lib.modules import control
from resources.lib.modules import client
from resources.lib.modules import metacache
from resources.lib.modules import workers
from resources.lib.modules import trakt
import sys,re,json,urllib,urlparse,datetime
params = dict(urlparse.parse_qsl(sys.argv[2].replace('?',''))) if len(sys.argv) > 1 else dict()
action = params.get('action')
control.moderator()
class channels:
def __init__(self):
self.list = [] ; self.items = []
self.uk_datetime = self.uk_datetime()
self.systime = (self.uk_datetime).strftime('%Y%m%d%H%M%S%f')
self.tm_img_link = 'https://image.tmdb.org/t/p/w%s%s'
self.lang = control.apiLanguage()['trakt']
self.sky_now_link = 'http://epgservices.sky.com/5.1.1/api/2.0/channel/json/%s/now/nn/0'
self.sky_programme_link = 'http://tv.sky.com/programme/channel/%s/%s/%s.json'
def get(self):
channels = [
('01', 'Sky Premiere', '4021'),
('02', 'Sky Premiere +1', '1823'),
('03', 'Sky Showcase', '4033'),
('04', 'Sky Greats', '1815'),
('05', 'Sky Disney', '4013'),
('06', 'Sky Family', '4018'),
('07', 'Sky Action', '4014'),
('08', 'Sky Comedy', '4019'),
('09', 'Sky Crime', '4062'),
('10', 'Sky Drama', '4016'),
('11', 'Sky Sci Fi', '4017'),
('12', 'Sky Select', '4020'),
('13', 'Film4', '4044'),
('14', 'Film4 +1', '1629'),
('15', 'TCM', '3811'),
('16', 'TCM +1', '5275')
]
threads = []
for i in channels: threads.append(workers.Thread(self.sky_list, i[0], i[1], i[2]))
[i.start() for i in threads]
[i.join() for i in threads]
threads = []
for i in range(0, len(self.items)): threads.append(workers.Thread(self.items_list, self.items[i]))
[i.start() for i in threads]
[i.join() for i in threads]
self.list = metacache.local(self.list, self.tm_img_link, 'poster2', 'fanart')
try: self.list = sorted(self.list, key=lambda k: k['num'])
except: pass
self.channelDirectory(self.list)
return self.list
def sky_list(self, num, channel, id):
try:
url = self.sky_now_link % id
result = client.request(url, timeout='10')
result = json.loads(result)
match = result['listings'][id][0]['url']
dt1 = (self.uk_datetime).strftime('%Y-%m-%d')
dt2 = int((self.uk_datetime).strftime('%H'))
if (dt2 < 6): dt2 = 0
elif (dt2 >= 6 and dt2 < 12): dt2 = 1
elif (dt2 >= 12 and dt2 < 18): dt2 = 2
elif (dt2 >= 18): dt2 = 3
url = self.sky_programme_link % (id, str(dt1), str(dt2))
result = client.request(url, timeout='10')
result = json.loads(result)
result = result['listings'][id]
result = [i for i in result if i['url'] == match][0]
year = result['d']
year = re.findall('[(](\d{4})[)]', year)[0].strip()
year = year.encode('utf-8')
title = result['t']
title = title.replace('(%s)' % year, '').strip()
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
self.items.append((title, year, channel, num))
except:
pass
def items_list(self, i):
try:
item = trakt.SearchAll(i[0], i[1], True)[0]
content = item.get('movie')
if not content: content = item.get('show')
item = content
title = item.get('title')
title = client.replaceHTMLCodes(title)
originaltitle = title
year = item.get('year', 0)
year = re.sub('[^0-9]', '', str(year))
imdb = item.get('ids', {}).get('imdb', '0')
imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
tmdb = str(item.get('ids', {}).get('tmdb', 0))
premiered = item.get('released', '0')
try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
genre = item.get('genres', [])
genre = [x.title() for x in genre]
genre = ' / '.join(genre).strip()
if not genre: genre = '0'
duration = str(item.get('Runtime', 0))
rating = item.get('rating', '0')
if not rating or rating == '0.0': rating = '0'
votes = item.get('votes', '0')
try: votes = str(format(int(votes), ',d'))
except: pass
mpaa = item.get('certification', '0')
if not mpaa: mpaa = '0'
tagline = item.get('tagline', '0')
plot = item.get('overview', '0')
people = trakt.getPeople(imdb, 'movies')
director = writer = ''
if 'crew' in people and 'directing' in people['crew']:
director = ', '.join([director['person']['name'] for director in people['crew']['directing'] if director['job'].lower() == 'director'])
if 'crew' in people and 'writing' in people['crew']:
writer = ', '.join([writer['person']['name'] for writer in people['crew']['writing'] if writer['job'].lower() in ['writer', 'screenplay', 'author']])
cast = []
for person in people.get('cast', []):
cast.append({'name': person['person']['name'], 'role': person['character']})
cast = [(person['name'], person['role']) for person in cast]
try:
if self.lang == 'en' or self.lang not in item.get('available_translations', [self.lang]): raise Exception()
trans_item = trakt.getMovieTranslation(imdb, self.lang, full=True)
title = trans_item.get('title') or title
tagline = trans_item.get('tagline') or tagline
plot = trans_item.get('overview') or plot
except:
pass
self.list.append({'title': title, 'originaltitle': originaltitle, 'year': year, 'premiered': premiered, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'tagline': tagline, 'imdb': imdb, 'tmdb': tmdb, 'poster': '0', 'channel': i[2], 'num': i[3]})
except:
pass
def uk_datetime(self):
dt = datetime.datetime.utcnow() + datetime.timedelta(hours = 0)
d = datetime.datetime(dt.year, 4, 1)
dston = d - datetime.timedelta(days=d.weekday() + 1)
d = datetime.datetime(dt.year, 11, 1)
dstoff = d - datetime.timedelta(days=d.weekday() + 1)
if dston <= dt < dstoff:
return dt + datetime.timedelta(hours = 1)
else:
return dt
def channelDirectory(self, items):
if items == None or len(items) == 0: control.idle() ; sys.exit()
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
addonPoster, addonBanner = control.addonPoster(), control.addonBanner()
addonFanart, settingFanart = control.addonFanart(), control.setting('fanart')
try: isOld = False ; control.item().getArt('type')
except: isOld = True
isPlayable = 'true' if not 'plugin' in control.infoLabel('Container.PluginName') else 'false'
playbackMenu = control.lang(32063).encode('utf-8') if control.setting('hosts.mode') == '2' else control.lang(32064).encode('utf-8')
queueMenu = control.lang(32065).encode('utf-8')
refreshMenu = control.lang(32072).encode('utf-8')
for i in items:
try:
label = '[B]%s[/B] : %s (%s)' % (i['channel'].upper(), i['title'], i['year'])
sysname = urllib.quote_plus('%s (%s)' % (i['title'], i['year']))
systitle = urllib.quote_plus(i['title'])
imdb, tmdb, year = i['imdb'], i['tmdb'], i['year']
meta = dict((k,v) for k, v in i.iteritems() if not v == '0')
meta.update({'code': imdb, 'imdbnumber': imdb, 'imdb_id': imdb})
meta.update({'tmdb_id': tmdb})
meta.update({'mediatype': 'movie'})
meta.update({'trailer': '%s?action=trailer&name=%s' % (sysaddon, sysname)})
#meta.update({'trailer': 'plugin://script.extendedinfo/?info=playtrailer&&id=%s' % imdb})
meta.update({'playcount': 0, 'overlay': 6})
try: meta.update({'genre': cleangenre.lang(meta['genre'], self.lang)})
except: pass
sysmeta = urllib.quote_plus(json.dumps(meta))
url = '%s?action=play&title=%s&year=%s&imdb=%s&meta=%s&t=%s' % (sysaddon, systitle, year, imdb, sysmeta, self.systime)
sysurl = urllib.quote_plus(url)
cm = []
cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon))
cm.append((refreshMenu, 'RunPlugin(%s?action=refresh)' % sysaddon))
cm.append((playbackMenu, 'RunPlugin(%s?action=alterSources&url=%s&meta=%s)' % (sysaddon, sysurl, sysmeta)))
if isOld == True:
cm.append((control.lang2(19033).encode('utf-8'), 'Action(Info)'))
item = control.item(label=label)
art = {}
if 'poster2' in i and not i['poster2'] == '0':
art.update({'icon': i['poster2'], 'thumb': i['poster2'], 'poster': i['poster2']})
elif 'poster' in i and not i['poster'] == '0':
art.update({'icon': i['poster'], 'thumb': i['poster'], 'poster': i['poster']})
else:
art.update({'icon': addonPoster, 'thumb': addonPoster, 'poster': addonPoster})
art.update({'banner': addonBanner})
if settingFanart == 'true' and 'fanart' in i and not i['fanart'] == '0':
item.setProperty('Fanart_Image', i['fanart'])
elif not addonFanart == None:
item.setProperty('Fanart_Image', addonFanart)
item.setArt(art)
item.addContextMenuItems(cm)
item.setProperty('IsPlayable', isPlayable)
item.setInfo(type='Video', infoLabels = meta)
video_streaminfo = {'codec': 'h264'}
item.addStreamInfo('video', video_streaminfo)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=False)
except:
pass
control.content(syshandle, 'files')
control.directory(syshandle, cacheToDisc=True)
|
|
# ------------------------------------------------------------------------------
# @brief:
# The nervenet plus! Where we use truncated bp to optimize the weights
# @Input:
# _input_obs: [num_prop_steps * batch_size, ob_size]
# _input_hidden_state: [batch_size, hidden_dim],
# _input_parameters: [batch_size, param_size_dict]
# @Output:
# _action_mu_output: [num_prop_steps * batch_size, ac_size]
# _action_dist_logstd_param: [num_prop_steps * batch_size, ac_size]
# ------------------------------------------------------------------------------
import init_path
import tensorflow as tf
import numpy as np
from .policy_network import policy_network
# from util import logger
from graph_util import mujoco_parser
from graph_util import gnn_util
from graph_util import gen_gnn_param
from util import nn_cells as nn
from six.moves import xrange
class nervenet(policy_network):
def __init__(self, session, name_scope,
input_size, output_size,
adj_matrix, node_attr,
weight_init_methods='orthogonal',
is_rollout_agent=True,
args=None):
self._node_update_method = args.node_update_method
self.adj_matrix = adj_matrix
self.node_attr = node_attr
policy_network.__init__(
self,
session,
name_scope,
input_size,
output_size,
ob_placeholder=None,
trainable=True,
build_network_now=False,
define_std=True,
args=args
)
self._base_dir = init_path.get_abs_base_dir()
self._root_connection_option = args.root_connection_option
self._num_prop_steps = args.gnn_num_prop_steps
self._init_method = weight_init_methods
self._gnn_node_option = args.gnn_node_option
self._gnn_output_option = args.gnn_output_option
self._gnn_embedding_option = args.gnn_embedding_option
self.is_rollout_agent = is_rollout_agent
self._nstep = 1 if self.is_rollout_agent else self._num_prop_steps
# parse the network shape and do validation check
self._network_shape = args.network_shape
self._hidden_dim = args.gnn_node_hidden_dim
self._input_feat_dim = args.gnn_input_feat_dim
self._seed = args.seed
self._npr = np.random.RandomState(args.seed)
assert self._input_feat_dim == self._hidden_dim
self._build_model()
def _build_model(self):
'''
@brief: everything about the network goes here
'''
with tf.get_default_graph().as_default():
tf.set_random_seed(self._seed)
# record the iteration count
self._iteration = tf.Variable(0, trainable=False, name='step')
# read from the xml files
self._parse_mujoco_template()
# prepare the network's input and output
self._prepare_placeholders()
# define the network here
self._build_network_weights()
self._build_network_graph()
# get the variable list ready
self._set_var_list()
def _prepare_placeholders(self):
'''
@brief:
get the input placeholders ready. The _input placeholder has
different size from the input we use for the general network.
'''
# step 1: build the input_obs and input_parameters
self._input_obs = {
node_type: tf.placeholder(
tf.float32,
[None, self._node_info['ob_size_dict'][node_type]],
name='input_ob_placeholder_ggnn'
)
for node_type in self._node_info['node_type_dict']
}
self._input_hidden_state = {
node_type: tf.placeholder(
tf.float32,
[None, self._hidden_dim],
name='input_hidden_dim_' + node_type
)
for node_type in self._node_info['node_type_dict']
}
input_parameter_dtype = tf.int32 \
if 'noninput' in self._gnn_embedding_option else tf.float32
self._input_parameters = {
node_type: tf.placeholder(
input_parameter_dtype,
[None, self._node_info['para_size_dict'][node_type]],
name='input_para_placeholder_ggnn')
for node_type in self._node_info['node_type_dict']
}
# step 2: the receive and send index
self._receive_idx = tf.placeholder(
tf.int32, shape=(None), name='receive_idx'
)
self._send_idx = {
edge_type: tf.placeholder(
tf.int32, shape=(None),
name='send_idx_{}'.format(edge_type))
for edge_type in self._node_info['edge_type_list']
}
# step 3: the node type index and inverse node type index
self._node_type_idx = {
node_type: tf.placeholder(
tf.int32, shape=(None),
name='node_type_idx_{}'.format(node_type))
for node_type in self._node_info['node_type_dict']
}
self._inverse_node_type_idx = tf.placeholder(
tf.int32, shape=(None), name='inverse_node_type_idx'
)
# step 4: the output node index
self._output_type_idx = {
output_type: tf.placeholder(
tf.int32, shape=(None),
name='output_type_idx_{}'.format(output_type)
)
for output_type in self._node_info['output_type_dict']
}
self._inverse_output_type_idx = tf.placeholder(
tf.int32, shape=(None), name='inverse_output_type_idx'
)
# step 5: batch_size
self._batch_size_int = tf.placeholder(
tf.int32, shape=(), name='batch_size_int'
)
def _build_network_weights(self):
'''
@brief: build the network
@weights:
_MLP_embedding (1 layer)
_MLP_ob_mapping (1 layer)
_MLP_prop (2 layer)
_MLP_output (2 layer)
'''
# step 1: build the weight parameters (mlp, gru)
with tf.variable_scope(self._name_scope):
# step 1_1: build the embedding matrix (mlp)
# tensor shape (None, para_size) --> (None, input_dim - ob_size)
assert self._input_feat_dim % 2 == 0
if 'noninput' not in self._gnn_embedding_option:
self._MLP_embedding = {
node_type: nn.MLP(
[self._input_feat_dim / 2,
self._node_info['para_size_dict'][node_type]],
init_method=self._init_method,
act_func=['tanh'] * 1, # one layer at most
add_bias=True,
scope='MLP_embedding_node_type_{}'.format(node_type)
)
for node_type in self._node_info['node_type_dict']
if self._node_info['ob_size_dict'][node_type] > 0
}
self._MLP_embedding.update({
node_type: nn.MLP(
[self._input_feat_dim,
self._node_info['para_size_dict'][node_type]],
init_method=self._init_method,
act_func=['tanh'] * 1, # one layer at most
add_bias=True,
scope='MLP_embedding_node_type_{}'.format(node_type)
)
for node_type in self._node_info['node_type_dict']
if self._node_info['ob_size_dict'][node_type] == 0
})
else:
embedding_vec_size = max(
np.reshape(
[max(self._node_info['node_parameters'][i_key])
for i_key in self._node_info['node_parameters']],
[-1]
)
) + 1
embedding_vec_size = int(embedding_vec_size)
self._embedding_variable = {}
out = self._npr.randn(
embedding_vec_size, int(self._input_feat_dim / 2)
).astype(np.float32)
out *= 1.0 / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
self._embedding_variable[False] = tf.Variable(
out, name='embedding_HALF', trainable=self._trainable
)
if np.any([node_size == 0 for _, node_size
in self._node_info['ob_size_dict'].items()]):
out = self._npr.randn(
embedding_vec_size, self._input_feat_dim
).astype(np.float32)
out *= 1.0 / np.sqrt(np.square(out).sum(axis=0,
keepdims=True))
self._embedding_variable[True] = tf.Variable(
out, name='embedding_FULL', trainable=self._trainable
)
# step 1_2: build the ob mapping matrix (mlp)
# tensor shape (None, para_size) --> (None, input_dim - ob_size)
self._MLP_ob_mapping = {
node_type: nn.MLP(
[self._input_feat_dim / 2,
self._node_info['ob_size_dict'][node_type]],
init_method=self._init_method,
act_func=['tanh'] * 1, # one layer at most
add_bias=True,
scope='MLP_embedding_node_type_{}'.format(node_type)
)
for node_type in self._node_info['node_type_dict']
if self._node_info['ob_size_dict'][node_type] > 0
}
# step 1_4: build the mlp for the propogation between nodes
'''
MLP_prop_shape = self._network_shape + \
[self._hidden_dim] + [self._hidden_dim]
self._MLP_prop = {
i_edge: nn.MLP(
MLP_prop_shape,
init_method=self._init_method,
act_func=['tanh'] * (len(MLP_prop_shape) - 1),
add_bias=True,
scope='MLP_prop_edge_{}'.format(i_edge)
)
for i_edge in self._node_info['edge_type_list']
}
'''
# step 1_5: build the node update function for each node type
if self._node_update_method == 'GRU':
self._Node_update = {
i_node_type: nn.GRU(
self._hidden_dim, # for both the message and ob
self._hidden_dim,
init_method=self._init_method,
scope='GRU_node_{}'.format(i_node_type)
)
for i_node_type in self._node_info['node_type_dict']
}
else:
assert False
# step 1_6: the mlp for the mu of the actions
# (l_1, l_2, ..., l_o, l_i)
MLP_out_shape = self._network_shape + \
[self.args.gnn_output_per_node] + [self._hidden_dim]
MLP_out_act_func = ['tanh'] * (len(MLP_out_shape) - 1)
MLP_out_act_func[-1] = None
self._MLP_Out = {
output_type: nn.MLP(
MLP_out_shape,
init_method=self._init_method,
act_func=MLP_out_act_func,
add_bias=True,
scope='MLP_out'
)
for output_type in self._node_info['output_type_dict']
}
# step 1_8: build the log std for the actions
self._action_dist_logstd = tf.Variable(
(0.0 * self._npr.randn(1, self._output_size)).astype(
np.float32
),
name="policy_logstd",
trainable=self._trainable
)
def _build_input_graph(self):
if 'noninput' not in self._gnn_embedding_option:
self._input_embedding = {
node_type: self._MLP_embedding[node_type](
self._input_parameters[node_type]
)[-1]
for node_type in self._node_info['node_type_dict']
}
else:
self._input_embedding = {
node_type: tf.gather(
self._embedding_variable[
self._node_info['ob_size_dict'][node_type] == 0
],
tf.reshape(self._input_parameters[node_type], [-1])
)
for node_type in self._node_info['node_type_dict']
}
# shape: [n_step, node_num, embedding_size + ob_size]
self._ob_feat = {
node_type: self._MLP_ob_mapping[node_type](
self._input_obs[node_type]
)[-1]
for node_type in self._node_info['node_type_dict']
if self._node_info['ob_size_dict'][node_type] > 0
}
self._ob_feat.update({
node_type: self._input_obs[node_type]
for node_type in self._node_info['node_type_dict']
if self._node_info['ob_size_dict'][node_type] == 0
})
self._input_feat = {
node_type: tf.concat([
tf.reshape(
self._input_embedding[node_type],
[-1, self._nstep *
len(self._node_info['node_type_dict'][node_type]),
int(self._input_feat_dim / 2)],
),
tf.reshape(
self._ob_feat[node_type],
[-1, self._nstep *
len(self._node_info['node_type_dict'][node_type]),
int(self._input_feat_dim / 2)],
)
], axis=2)
for node_type in self._node_info['node_type_dict']
}
split_feat_list = {
node_type: tf.split(
self._input_feat[node_type],
self._nstep,
axis=1,
name='split_into_nstep' + node_type
)
for node_type in self._node_info['node_type_dict']
}
feat_list = []
for i_step in range(self._nstep):
# for node_type in self._node_info['node_type_dict']:
feat_list.append(
tf.concat(
[tf.reshape(split_feat_list[node_type][i_step],
[-1, self._input_feat_dim])
for node_type in self._node_info['node_type_dict']],
axis=0 # the node
)
)
self._input_feat_list = [
tf.gather( # get node order into graph order
i_step_data,
self._inverse_node_type_idx,
name='get_order_back_gather_init' + str(i_step),
)
for i_step, i_step_data in enumerate(feat_list)
]
current_hidden_state = tf.concat(
[self._input_hidden_state[node_type]
for node_type in self._node_info['node_type_dict']],
axis=0
)
current_hidden_state = tf.gather( # get node order into graph order
current_hidden_state,
self._inverse_node_type_idx,
name='get_order_back_gather_init'
)
return current_hidden_state
def _build_network_graph(self):
current_hidden_state = self._build_input_graph()
# step 3: unroll the propogation
self._action_mu_output = [] # [nstep, None, n_action_size]
for tt in xrange(self._nstep):
current_input_feat = self._input_feat_list[tt]
'''
self._prop_msg = []
for ee, i_edge_type in enumerate(self._node_info['edge_type_list']):
node_activate = \
tf.gather(
current_input_feat,
self._send_idx[i_edge_type],
name='edge_id_{}_prop_steps_{}'.format(i_edge_type, tt)
)
self._prop_msg.append(
self._MLP_prop[i_edge_type](node_activate)[-1]
)
# aggregate messages
concat_msg = tf.concat(self._prop_msg, 0)
self.concat_msg = concat_msg
message = tf.unsorted_segment_sum(
concat_msg, self._receive_idx,
self._node_info['num_nodes'] * self._batch_size_int
)
denom_const = tf.unsorted_segment_sum(
tf.ones_like(concat_msg), self._receive_idx,
self._node_info['num_nodes'] * self._batch_size_int
)
message = tf.div(message, (denom_const + tf.constant(1.0e-10)))
node_update_input = tf.concat([message, current_input_feat], axis=1,
name='ddbug' + str(tt))
'''
node_update_input = current_input_feat
# update the hidden states via GRU
new_state = []
for i_node_type in self._node_info['node_type_dict']:
new_state.append(
self._Node_update[i_node_type](
tf.gather(
node_update_input,
self._node_type_idx[i_node_type],
name='GRU_message_node_type_{}_prop_step_{}'.format(
i_node_type, tt
)
),
tf.gather(
current_hidden_state,
self._node_type_idx[i_node_type],
name='GRU_feat_node_type_{}_prop_steps_{}'.format(
i_node_type, tt
)
)
)
)
self.output_hidden_state = {
node_type: new_state[i_id]
for i_id, node_type
in enumerate(self._node_info['node_type_dict'])
}
new_state = tf.concat(new_state, 0) # BTW, the order is wrong
# now, get the orders back
current_hidden_state = tf.gather(
new_state, self._inverse_node_type_idx,
name='get_order_back_gather_prop_steps_{}'.format(tt)
)
# self._action_mu_output = [] # [nstep, None, n_action_size]
action_mu_output = []
for output_type in self._node_info['output_type_dict']:
action_mu_output.append(
self._MLP_Out[output_type](
tf.gather(
current_hidden_state,
self._output_type_idx[output_type],
name='output_type_{}'.format(output_type)
)
)[-1]
)
action_mu_output = tf.concat(action_mu_output, 0)
action_mu_output = tf.gather(
action_mu_output,
self._inverse_output_type_idx,
name='output_inverse'
)
action_mu_output = tf.reshape(action_mu_output,
[self._batch_size_int, -1])
self._action_mu_output.append(action_mu_output)
self._action_mu_output = tf.reshape(
tf.concat(self._action_mu_output, axis=1), [-1, self._output_size]
)
# step 4: build the log std for the actions
self._action_dist_logstd_param = tf.reshape(
tf.tile(
tf.reshape(self._action_dist_logstd, [1, 1, self._output_size],
name='test'),
[self._nstep, self._batch_size_int, 1]
), [-1, self._output_size]
)
def _parse_mujoco_template(self):
'''
@brief:
In this function, we construct the dict for node information.
The structure is _node_info
@attribute:
1. general informatin about the graph
@self._node_info['tree']
@self._node_info['debug_info']
@self._node_info['relation_matrix']
2. information about input output
@self._node_info['input_dict']:
self._node_info['input_dict'][id_of_node] is a list of
ob positions
@self._node_info['output_list']
3. information about the node
@self._node_info['node_type_dict']:
self._node_info['node_type_dict']['body'] is a list of
node id
@self._node_info['num_nodes']
4. information about the edge
@self._node_info['edge_type_list'] = self._edge_type_list
the list of edge ids
@self._node_info['num_edges']
@self._node_info['num_edge_type']
6. information about the index
@self._node_info['node_in_graph_list']
The order of nodes if placed by types ('joint', 'body')
@self._node_info['inverse_node_list']
The inverse of 'node_in_graph_list'
@self._node_info['receive_idx'] = receive_idx
@self._node_info['receive_idx_raw'] = receive_idx_raw
@self._node_info['send_idx'] = send_idx
7. information about the embedding size and ob size
@self._node_info['para_size_dict']
@self._node_info['ob_size_dict']
self._node_info['ob_size_dict']['root'] = 10
self._node_info['ob_size_dict']['joint'] = 6
'''
# step 0: parse the mujoco xml
if 'evo' in self.args.task:
self._node_info = gen_gnn_param.gen_gnn_param(
self._task_name,
self.adj_matrix,
self.node_attr,
gnn_node_option=self._gnn_node_option,
root_connection_option=self._root_connection_option,
gnn_output_option=self._gnn_output_option,
gnn_embedding_option=self._gnn_embedding_option
)
else:
self._node_info = mujoco_parser.parse_mujoco_graph(
self._task_name,
gnn_node_option=self._gnn_node_option,
root_connection_option=self._root_connection_option,
gnn_output_option=self._gnn_output_option,
gnn_embedding_option=self._gnn_embedding_option
)
# step 1: check that the input and output size is matched
gnn_util.io_size_check(self._input_size, self._output_size,
self._node_info, self._is_baseline)
# step 2: check for ob size for each node type, construct the node dict
self._node_info = gnn_util.construct_ob_size_dict(self._node_info,
self._input_feat_dim)
# step 3: get the inverse node offsets (used to construct gather idx)
self._node_info = gnn_util.get_inverse_type_offset(self._node_info,
'node')
# step 4: get the inverse node offsets (used to gather output idx)
self._node_info = gnn_util.get_inverse_type_offset(self._node_info,
'output')
# step 5: register existing edge and get the receive and send index
self._node_info = gnn_util.get_receive_send_idx(self._node_info)
def get_num_nodes(self):
return self._node_info['num_nodes']
def get_logstd(self):
return self._action_dist_logstd
def _set_var_list(self):
# collect the tf variable and the trainable tf variable
self._trainable_var_list = [var for var in tf.trainable_variables()
if self._name_scope in var.name]
self._all_var_list = [var for var in tf.global_variables()
if self._name_scope in var.name]
def get_node_info(self):
return self._node_info
def get_gnn_idx_placeholder(self):
'''
@brief: return the placeholders to the agent to construct feed dict
'''
return self._receive_idx, self._send_idx, \
self._node_type_idx, self._inverse_node_type_idx, \
self._output_type_idx, self._inverse_output_type_idx, \
self._batch_size_int
def get_input_obs_placeholder(self):
return self._input_obs
def get_input_parameters_placeholder(self):
return self._input_parameters
def get_output_hidden_state_list(self):
return [self.output_hidden_state[key]
for key in self._node_info['node_type_dict']]
def get_input_hidden_state_placeholder(self):
'''
self._input_hidden_state = {
node_type: tf.placeholder(
tf.float32,
[None, self._hidden_dim],
name='input_hidden_dim'
)
for node_type in self._node_info['node_type_dict']
}
'''
return self._input_hidden_state
|
|
# encoding: utf-8
"""
Parser for Compact XML Expression Language (CXEL) ('see-ex-ell'), a compact
XML specification language I made up that's useful for producing XML element
trees suitable for unit testing.
"""
from __future__ import print_function
from pyparsing import (
alphas, alphanums, Combine, dblQuotedString, delimitedList, Forward,
Group, Literal, Optional, removeQuotes, stringEnd, Suppress, Word
)
from docx.oxml import parse_xml
from docx.oxml.ns import nsmap
# ====================================================================
# api functions
# ====================================================================
def element(cxel_str):
"""
Return an oxml element parsed from the XML generated from *cxel_str*.
"""
_xml = xml(cxel_str)
return parse_xml(_xml)
def xml(cxel_str):
"""
Return the XML generated from *cxel_str*.
"""
root_token = root_node.parseString(cxel_str)
xml = root_token.element.xml
return xml
# ====================================================================
# internals
# ====================================================================
def nsdecls(*nspfxs):
"""
Return a string containing a namespace declaration for each of *nspfxs*,
in the order they are specified.
"""
nsdecls = ''
for nspfx in nspfxs:
nsdecls += ' xmlns:%s="%s"' % (nspfx, nsmap[nspfx])
return nsdecls
class Element(object):
"""
Represents an XML element, having a namespace, tagname, attributes, and
may contain either text or children (but not both) or may be empty.
"""
def __init__(self, tagname, attrs, text):
self._tagname = tagname
self._attrs = attrs
self._text = text
self._children = []
self._is_root = False
def __repr__(self):
"""
Provide a more meaningful repr value for an Element object, one that
displays the tagname as a simple empty element, e.g. ``<w:pPr/>``.
"""
return "<%s/>" % self._tagname
def connect_children(self, child_node_list):
"""
Make each of the elements appearing in *child_node_list* a child of
this element.
"""
for node in child_node_list:
child = node.element
self._children.append(child)
@classmethod
def from_token(cls, token):
"""
Return an ``Element`` object constructed from a parser element token.
"""
tagname = token.tagname
attrs = [(name, value) for name, value in token.attr_list]
text = token.text
return cls(tagname, attrs, text)
@property
def is_root(self):
"""
|True| if this element is the root of the tree and should include the
namespace prefixes. |False| otherwise.
"""
return self._is_root
@is_root.setter
def is_root(self, value):
self._is_root = bool(value)
@property
def nspfx(self):
"""
The namespace prefix of this element, the empty string (``''``) if
the tag is in the default namespace.
"""
tagname = self._tagname
idx = tagname.find(':')
if idx == -1:
return ''
return tagname[:idx]
@property
def nspfxs(self):
"""
A sequence containing each of the namespace prefixes appearing in
this tree. Each prefix appears once and only once, and in document
order.
"""
def merge(seq, seq_2):
for item in seq_2:
if item in seq:
continue
seq.append(item)
nspfxs = [self.nspfx]
for child in self._children:
merge(nspfxs, child.nspfxs)
return nspfxs
@property
def xml(self):
"""
The XML corresponding to the tree rooted at this element,
pretty-printed using 2-spaces indentation at each level and with
a trailing '\n'.
"""
return self._xml(indent=0)
def _xml(self, indent):
"""
Return a string containing the XML of this element and all its
children with a starting indent of *indent* spaces.
"""
self._indent_str = ' ' * indent
xml = self._start_tag
for child in self._children:
xml += child._xml(indent+2)
xml += self._end_tag
return xml
@property
def _start_tag(self):
"""
The text of the opening tag of this element, including attributes. If
this is the root element, a namespace declaration for each of the
namespace prefixes that occur in this tree is added in front of any
attributes. If this element contains text, that text follows the
start tag. If not, and this element has no children, an empty tag is
returned. Otherwise, an opening tag is returned, followed by
a newline. The tag is indented by this element's indent value in all
cases.
"""
_nsdecls = nsdecls(*self.nspfxs) if self.is_root else ''
tag = '%s<%s%s' % (self._indent_str, self._tagname, _nsdecls)
for attr in self._attrs:
name, value = attr
tag += ' %s="%s"' % (name, value)
if self._text:
tag += '>%s' % self._text
elif self._children:
tag += '>\n'
else:
tag += '/>\n'
return tag
@property
def _end_tag(self):
"""
The text of the closing tag of this element, if there is one. If the
element contains text, no leading indentation is included.
"""
if self._text:
tag = '</%s>\n' % self._tagname
elif self._children:
tag = '%s</%s>\n' % (self._indent_str, self._tagname)
else:
tag = ''
return tag
# ====================================================================
# parser
# ====================================================================
# parse actions ----------------------------------
def connect_node_children(s, loc, tokens):
node = tokens[0]
node.element.connect_children(node.child_node_list)
def connect_root_node_children(root_node):
root_node.element.connect_children(root_node.child_node_list)
root_node.element.is_root = True
def grammar():
# terminals ----------------------------------
colon = Literal(':')
equal = Suppress('=')
slash = Suppress('/')
open_paren = Suppress('(')
close_paren = Suppress(')')
open_brace = Suppress('{')
close_brace = Suppress('}')
# np:tagName ---------------------------------
nspfx = Word(alphas)
local_name = Word(alphas)
tagname = Combine(nspfx + colon + local_name)
# np:attr_name=attr_val ----------------------
attr_name = Word(alphas + ':')
attr_val = Word(alphanums + '-.%')
attr_def = Group(attr_name + equal + attr_val)
attr_list = open_brace + delimitedList(attr_def) + close_brace
text = dblQuotedString.setParseAction(removeQuotes)
# w:jc{val=right} ----------------------------
element = (
tagname('tagname')
+ Group(Optional(attr_list))('attr_list')
+ Optional(text, default='')('text')
).setParseAction(Element.from_token)
child_node_list = Forward()
node = Group(
element('element')
+ Group(Optional(slash + child_node_list))('child_node_list')
).setParseAction(connect_node_children)
child_node_list << (
open_paren + delimitedList(node) + close_paren
| node
)
root_node = (
element('element')
+ Group(Optional(slash + child_node_list))('child_node_list')
+ stringEnd
).setParseAction(connect_root_node_children)
return root_node
root_node = grammar()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class VirtualNetworkGatewayConnectionsOperations(object):
"""VirtualNetworkGatewayConnectionsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-09-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-09-01"
self.config = config
def _create_or_update_initial(
self, resource_group_name, virtual_network_gateway_connection_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualNetworkGatewayConnection')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGatewayConnection', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkGatewayConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, virtual_network_gateway_connection_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a virtual network gateway connection in the
specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the
virtual network gateway connection.
:type virtual_network_gateway_connection_name: str
:param parameters: Parameters supplied to the create or update virtual
network gateway connection operation.
:type parameters:
~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGatewayConnection
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
VirtualNetworkGatewayConnection or
ClientRawResponse<VirtualNetworkGatewayConnection> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGatewayConnection]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGatewayConnection]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkGatewayConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'}
def get(
self, resource_group_name, virtual_network_gateway_connection_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified virtual network gateway connection by resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the
virtual network gateway connection.
:type virtual_network_gateway_connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualNetworkGatewayConnection or ClientRawResponse if
raw=true
:rtype:
~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGatewayConnection
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGatewayConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'}
def _delete_initial(
self, resource_group_name, virtual_network_gateway_connection_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, virtual_network_gateway_connection_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified virtual network Gateway connection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the
virtual network gateway connection.
:type virtual_network_gateway_connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'}
def _update_tags_initial(
self, resource_group_name, virtual_network_gateway_connection_name, tags=None, custom_headers=None, raw=False, **operation_config):
parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGatewayConnectionListEntity', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_tags(
self, resource_group_name, virtual_network_gateway_connection_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates a virtual network gateway connection tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the
virtual network gateway connection.
:type virtual_network_gateway_connection_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
VirtualNetworkGatewayConnectionListEntity or
ClientRawResponse<VirtualNetworkGatewayConnectionListEntity> if
raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGatewayConnectionListEntity]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGatewayConnectionListEntity]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name,
tags=tags,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkGatewayConnectionListEntity', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'}
def _set_shared_key_initial(
self, resource_group_name, virtual_network_gateway_connection_name, value, custom_headers=None, raw=False, **operation_config):
parameters = models.ConnectionSharedKey(value=value)
# Construct URL
url = self.set_shared_key.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ConnectionSharedKey')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectionSharedKey', response)
if response.status_code == 201:
deserialized = self._deserialize('ConnectionSharedKey', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def set_shared_key(
self, resource_group_name, virtual_network_gateway_connection_name, value, custom_headers=None, raw=False, polling=True, **operation_config):
"""The Put VirtualNetworkGatewayConnectionSharedKey operation sets the
virtual network gateway connection shared key for passed virtual
network gateway connection in the specified resource group through
Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The virtual network
gateway connection name.
:type virtual_network_gateway_connection_name: str
:param value: The virtual network connection shared key value.
:type value: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns ConnectionSharedKey or
ClientRawResponse<ConnectionSharedKey> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.ConnectionSharedKey]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.ConnectionSharedKey]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._set_shared_key_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name,
value=value,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ConnectionSharedKey', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
set_shared_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey'}
def get_shared_key(
self, resource_group_name, virtual_network_gateway_connection_name, custom_headers=None, raw=False, **operation_config):
"""The Get VirtualNetworkGatewayConnectionSharedKey operation retrieves
information about the specified virtual network gateway connection
shared key through Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The virtual network
gateway connection shared key name.
:type virtual_network_gateway_connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ConnectionSharedKey or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_09_01.models.ConnectionSharedKey or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_shared_key.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectionSharedKey', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_shared_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""The List VirtualNetworkGatewayConnections operation retrieves all the
virtual network gateways connections created.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualNetworkGatewayConnection
:rtype:
~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGatewayConnectionPaged[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGatewayConnection]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkGatewayConnectionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkGatewayConnectionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections'}
def _reset_shared_key_initial(
self, resource_group_name, virtual_network_gateway_connection_name, key_length, custom_headers=None, raw=False, **operation_config):
parameters = models.ConnectionResetSharedKey(key_length=key_length)
# Construct URL
url = self.reset_shared_key.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ConnectionResetSharedKey')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectionResetSharedKey', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def reset_shared_key(
self, resource_group_name, virtual_network_gateway_connection_name, key_length, custom_headers=None, raw=False, polling=True, **operation_config):
"""The VirtualNetworkGatewayConnectionResetSharedKey operation resets the
virtual network gateway connection shared key for passed virtual
network gateway connection in the specified resource group through
Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The virtual network
gateway connection reset shared key Name.
:type virtual_network_gateway_connection_name: str
:param key_length: The virtual network connection reset shared key
length, should between 1 and 128.
:type key_length: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
ConnectionResetSharedKey or
ClientRawResponse<ConnectionResetSharedKey> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.ConnectionResetSharedKey]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.ConnectionResetSharedKey]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._reset_shared_key_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name,
key_length=key_length,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ConnectionResetSharedKey', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
reset_shared_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey/reset'}
|
|
# Copyright 2014-2019 M. A. Zentile, J. Keaveney, L. Weller, D. Whiting,
# C. S. Adams and I. G. Hughes.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Random restart fitting routine.
Fit by taking a random sample around parameters and then
fit using Marquardt-Levenberg.
Complete rebuild of the original RR fitting module now using lmfit
Author: JK
Last updated 2018-02-21 MAZ
"""
# py 2.7 compatibility
from __future__ import (division, print_function, absolute_import)
import numpy as np
import matplotlib.pyplot as plt
import warnings
import sys
import copy
import psutil
from multiprocessing import Pool
import MLFittingRoutine as ML
import lmfit as lm
from spectra import get_spectra
p_dict_bounds_default = {'lcell':1e-3,'Bfield':100., 'T':20.,
'GammaBuf':20., 'shift':100.,
# Polarisation of light
'theta0':10., 'E_x':0.05, 'E_y':0.05, 'E_phase':0.01,
# B-field angle w.r.t. light k-vector
'Btheta':10*3.14/180, 'Bphi':10*3.14/180,
'DoppTemp':20.,
'rb85frac':1, 'K40frac':1, 'K41frac':1,
}
def evaluate(args):
data = args[0]
E_in = args[1]
p_dict = args[2]
p_dict_bools = args[3]
data_type = args[4]
best_params, result = ML.ML_fit(data, E_in, p_dict, p_dict_bools, data_type)
#print 'Eval_ML COmplete'
# returns reduced chi-squared value and best fit parameters
return result.redchi, best_params #, result
def RR_fit(data,E_in,p_dict,p_dict_bools,p_dict_bounds=None,no_evals=None,data_type='S0',verbose=False):
"""
Random restart fitting method.
data: an Nx2 iterable for the x and y data to be fitted
E_in: the initial electric field input. See docstring for the spectra.py module for details.
no_evals: The number of randomly-selected start points for downhill fitting. Defaults to 2**(3+2*nFitParams) where nFitParams is
the number of varying fit parameters
p_dict: dictionary containing all the calculation (initial) parameters
p_dict_bools: dictionary with the same keys as p_dict, with Boolean values representing each parameter that is to be varied in the fitting
p_dict_bounds: dictionary with the same keys as p_dict, with values that are pairs of min/max values that each parameter can take.
NOTE: this works slightly differently to p_dict_bounds in the other fitting methods. In RR fitting, the bounds
select the range in parameter space that is randomly explored as starting parameters for a downhill fit, rather than being
strict bounds on the fit parameters.
data_type: Data type to fit experimental data to. Can be one of:
'S0', 'S1', 'S2', 'S3', 'Ix', 'Iy', ...
verbose: Boolean - more print statements provided as the program progresses
"""
if p_dict_bounds is None:
p_dict_bounds = p_dict_bounds_default
print('Starting Random Restart Fitting Routine')
x = np.array(data[0])
y = np.array(data[1])
p_dict['E_x'] = E_in[0]
p_dict['E_y'] = E_in[1][0]
p_dict['E_phase'] = E_in[1][1]
# count number of fit parameters
nFitParams = 0
for key in p_dict_bools:
if p_dict_bools[key]: nFitParams += 1
# default number of iterations based on number of fit parameters
if no_evals == None:
no_evals = nFitParams**2 + 5 # 2**(3+2*nFitParams)
# Create random array of starting parameters based on parameter ranges given in p_dict range dictionary
# Scattered uniformly over the parameter space
#clone the parameter dictionary
p_dict_list = []
for i in range(no_evals):
p_dict_list.append(copy.deepcopy(p_dict))
for key in p_dict_bools:
if p_dict_bools[key]==True:
start_vals = p_dict[key]
#print start_vals
for i in range(len(p_dict_list)):
p_dict_list[i][key] = start_vals + np.random.uniform(-1,1) * p_dict_bounds[key]
if verbose:
print('List of initial parameter dictionaries:')
for pd in p_dict_list:
print(pd)
#print p_dict_list
print('\n\n')
#Do parallel ML fitting by utilising multiple cores
po = Pool() # Pool() uses all cores, Pool(3) uses 3 cores for example.
## use lower process priority so computer is still responsive while calculating!!
# parent = psutil.Process()
# parent.nice(psutil.BELOW_NORMAL_PRIORITY_CLASS)
# for child in parent.children():
# child.nice(psutil.IDLE_PRIORITY_CLASS)
args_list = [(data, E_in, p_dict_list[k], p_dict_bools, data_type) for k in range(no_evals)]
Res = po.map_async(evaluate,args_list)
result = Res.get()
po.close()
po.join()
if verbose: print('RR calculation complete')
#Find best fit
result = np.array(result)
#print result
#result = result.astype(np.float64)
lineMin = np.argmin(result[:,0]) ## pick the fit with the lowest cost value
best_values = result[lineMin][1] # best parameter dictionary
if verbose:
print('\n\n\n')
print(best_values)
p_dict_best = copy.deepcopy(p_dict)
p_dict_best.update(best_values)
# Finally run the ML fitting one more time, using the best parameters
# (so we get the final_result object, which cannot be pickled and therefore isn't supported in multiprocessing)
best_values, final_result = ML.ML_fit(data, E_in, p_dict_best, p_dict_bools, data_type)
# return best fit parameters, and the lmfit result object
return best_values, final_result
def test_fit():
p_dict = {'Elem':'Rb','Dline':'D2','T':80.,'lcell':2e-3,'Bfield':600.,'Btheta':0.,
'Bphi':0.,'GammaBuf':0.,'shift':0.}
# only need to specify parameters that are varied
p_dict_bools = {'T':True,'Bfield':True,'E_x':True}
p_dict_bounds = {'T':10,'Bfield':100,'E_x':0.01}
E_in = np.array([0.7,0.7,0])
E_in_angle = [E_in[0].real,[abs(E_in[1]),np.angle(E_in[1])]]
print(E_in_angle)
x = np.linspace(-10000,10000,100)
[y] = get_spectra(x,E_in,p_dict,outputs=['S1']) + np.random.randn(len(x))*0.015
data = [x,y.real]
best_params, result = RR_fit(data, E_in_angle, p_dict, p_dict_bools, p_dict_bounds, no_evals = 8, data_type='S1')
report = result.fit_report()
fit = result.best_fit
print(report)
plt.plot(x,y,'ko')
plt.plot(x,fit,'r-',lw=2)
plt.show()
if __name__ == '__main__':
test_fit()
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Timing benchmark for AlexNet inference.
To run, use:
bazel run -c opt --config=cuda \
third_party/tensorflow/models/image/alexnet:alexnet_benchmark
Across 100 steps on batch size = 128.
Forward pass:
Run on Tesla K40c: 145 +/- 1.5 ms / batch
Run on Titan X: 70 +/- 0.1 ms / batch
Forward-backward pass:
Run on Tesla K40c: 480 +/- 48 ms / batch
Run on Titan X: 244 +/- 30 ms / batch
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import time
import tensorflow.python.platform
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Batch size.""")
tf.app.flags.DEFINE_integer('num_batches', 100,
"""Number of batches to run.""")
def print_activations(t):
print(t.op.name, ' ', t.get_shape().as_list())
def inference(images):
"""Build the AlexNet model.
Args:
images: Images Tensor
Returns:
pool5: the last Tensor in the convolutional component of AlexNet.
parameters: a list of Tensors corresponding to the weights and biases of the
AlexNet model.
"""
parameters = []
# conv1
with tf.name_scope('conv1') as scope:
kernel = tf.Variable(tf.truncated_normal([11, 11, 3, 64], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(images, kernel, [1, 4, 4, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope)
print_activations(conv1)
parameters += [kernel, biases]
# lrn1
# TODO(shlens, jiayq): Add a GPU version of local response normalization.
# pool1
pool1 = tf.nn.max_pool(conv1,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID',
name='pool1')
print_activations(pool1)
# conv2
with tf.name_scope('conv2') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 5, 64, 192], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope)
parameters += [kernel, biases]
print_activations(conv2)
# pool2
pool2 = tf.nn.max_pool(conv2,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID',
name='pool2')
print_activations(pool2)
# conv3
with tf.name_scope('conv3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 192, 384],
dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[384], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(bias, name=scope)
parameters += [kernel, biases]
print_activations(conv3)
# conv4
with tf.name_scope('conv4') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 384, 256],
dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv4 = tf.nn.relu(bias, name=scope)
parameters += [kernel, biases]
print_activations(conv4)
# conv5
with tf.name_scope('conv5') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256],
dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv5 = tf.nn.relu(bias, name=scope)
parameters += [kernel, biases]
print_activations(conv5)
# pool5
pool5 = tf.nn.max_pool(conv5,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID',
name='pool5')
print_activations(pool5)
return pool5, parameters
def time_tensorflow_run(session, target, info_string):
"""Run the computation to obtain the target tensor and print timing stats.
Args:
session: the TensorFlow session to run the computation under.
target: the target Tensor that is passed to the session's run() function.
info_string: a string summarizing this run, to be printed with the stats.
Returns:
None
"""
num_steps_burn_in = 10
total_duration = 0.0
total_duration_squared = 0.0
for i in xrange(FLAGS.num_batches + num_steps_burn_in):
start_time = time.time()
_ = session.run(target)
duration = time.time() - start_time
if i > num_steps_burn_in:
if not i % 10:
print ('%s: step %d, duration = %.3f' %
(datetime.now(), i - num_steps_burn_in, duration))
total_duration += duration
total_duration_squared += duration * duration
mn = total_duration / FLAGS.num_batches
vr = total_duration_squared / FLAGS.num_batches - mn * mn
sd = math.sqrt(vr)
print ('%s: %s across %d steps, %.3f +/- %.3f sec / batch' %
(datetime.now(), info_string, FLAGS.num_batches, mn, sd))
def run_benchmark():
"""Run the benchmark on AlexNet."""
with tf.Graph().as_default():
# Generate some dummy images.
image_size = 224
# Note that our padding definition is slightly different the cuda-convnet.
# In order to force the model to start with the same activations sizes,
# we add 3 to the image_size and employ VALID padding above.
images = tf.Variable(tf.random_normal([FLAGS.batch_size,
image_size,
image_size, 3],
dtype=tf.float32,
stddev=1e-1))
# Build a Graph that computes the logits predictions from the
# inference model.
pool5, parameters = inference(images)
# Build an initialization operation.
init = tf.initialize_all_variables()
# Start running operations on the Graph.
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'
sess = tf.Session(config=config)
sess.run(init)
# Run the forward benchmark.
time_tensorflow_run(sess, pool5, "Forward")
# Add a simple objective so we can calculate the backward pass.
objective = tf.nn.l2_loss(pool5)
# Compute the gradient with respect to all the parameters.
grad = tf.gradients(objective, parameters)
# Run the backward benchmark.
time_tensorflow_run(sess, grad, "Forward-backward")
def main(_):
run_benchmark()
if __name__ == '__main__':
tf.app.run()
|
|
# coding=utf-8
# Copyright 2015 Foursquare Labs Inc. All Rights Reserved.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import re
import shutil
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.build_graph.address import Address
from pants.build_graph.resources import Resources
from pants.option.custom_types import target_list_option, target_option
from pants.task.simple_codegen_task import SimpleCodegenTask
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_property
from fsqio.pants.spindle.targets.spindle_thrift_library import SpindleThriftLibrary
from fsqio.pants.spindle.tasks.spindle_task import SpindleTask
NAMESPACE_PARSER = re.compile(r'^\s*namespace\s+([^\s]+)\s+([^\s]+)\s*$')
class SpindleGen(SpindleTask, SimpleCodegenTask):
"""Generate codegen for spindle libraries."""
# In order to ensure that any codegen generated by Spindle respects potential changes to the Spindle source code,
# the SpindleGen task invokes a binary that is bootstrapped by an shelled pants run that is injected into the
# task graph.
# Practically, this means that if changes to the Spindle binary's source code are detected, a task is run
# at the beginning of the execution that invokes pants in a subshell and compiles the binary. That SpindleBinary is
# then used here during codegen, hopefully making the experience of working on Spindle a semi-reasonable experience.
@classmethod
def register_options(cls, register):
super(SpindleGen, cls).register_options(register)
register(
'--runtime-dependency',
advanced=True,
fingerprint=True,
type=target_list_option,
help='A list of targets that all spindle codegen depends on at runtime.',
)
register(
'--scala-ssp-template',
fingerprint=True,
advanced=True,
type=target_option,
help='Use this target as the scala templates for spindle codegen (required to be 1 target).',
)
register(
'--write-annotations-json',
fingerprint=True,
advanced=True,
type=bool,
default=False,
help='output *.annotations.json files for runtime class lists without reflection',
)
@classmethod
def implementation_version(cls):
return super(SpindleGen, cls).implementation_version() + [('SpindleGen', 2)]
@classmethod
def prepare(cls, options, round_manager):
super(SpindleGen, cls).prepare(options, round_manager)
round_manager.require('spindle_binary')
@staticmethod
def namespace_out(cache_dir):
return os.path.join(cache_dir, 'scala_record')
@staticmethod
def scalate_workdir(cache_dir):
return os.path.join(cache_dir, 'scalate')
@memoized_property
def scala_template(self):
return self.get_ssp_templates(
self.context.build_graph.get_target_from_spec(self.get_options().scala_ssp_template)
)
@memoized_property
def _jvm_options(self):
return self.get_options().jvm_options
@memoized_property
def _annotations(self):
return self.get_options().write_annotations_json
@memoized_property
def spindle_binary(self):
spindle_products = self.context.products.get('spindle_binary')
products = spindle_products.get(self.spindle_target)
if products:
for directory, product in products.items():
for filename in product:
binary = os.path.join(directory, filename)
if len(product) == 1 and os.path.isfile(binary):
return binary
# In this case we know there is just one product in the spindle_binary product.
raise TaskError(
'Spindle requires a single snapshot of Spindle runtime source code in order to bootstrap.\n'
'Found: {}\n'.format(product)
)
@memoized_property
def _resolved_runtime_deps(self):
"""Returns a twitter.common.collections.orderedset.OrderedSet."""
# Cache resolution of runtime deps, since all spindle targets share them.
return self.resolve_deps(self.get_options().runtime_dependency)
def synthetic_target_extra_dependencies(self, target, target_workdir):
"""Bundle in spindle common and, optionally, json annotations."""
if self._annotations:
json_files = [path for path in os.listdir(target_workdir) if path.endswith('.json')]
if json_files:
return self._resolved_runtime_deps | \
[self.make_json_resource(os.path.relpath(target_workdir, get_buildroot()), json_files)]
return self._resolved_runtime_deps
@staticmethod
def synthetic_target_type(target):
return ScalaLibrary
@staticmethod
def is_gentarget(target):
return isinstance(target, SpindleThriftLibrary)
def execute(self):
# Spindle has two different outputs:
# 1) an intermediate cache of compiled templates in the scalate_workdir
# 2) its primary output, generated scala and java files.
#
# A regular scala_library target that depends on a Spindle target only needs the generated source files, (2).
# This is tricky to map, because Spindle, like regular Thrift, supports includes. Operating over a target with
# includes will generate source files for both the primary target as well as every 'included' target.
# So invoking spindle on a per-target basis can result in the same source files being generated dozens of times.
# That also includes regenerating the intermediate cache in the scalate_workdir, and is all extraordinarily slow.
#
# We get around this here by using a heuristic to map just the source files generated by the target. This is
# much safer then the original algorithm, which esssentially ran spindle once, in a reused output dir that had no
# concept of namespacing or caching and disbursed the files to downstream targets from there.
# This temporary directory serves as a stable workspace and cache for the entire task. The output is
# copied to the vt.results_dir on a per-target basis and treated like regular Pants artifact from there.
with self.invalidated(
self.codegen_targets(),
invalidate_dependents=True,
fingerprint_strategy=self.get_fingerprint_strategy(),
) as invalidation_check:
with self.context.new_workunit(name='execute', labels=[WorkUnitLabel.MULTITOOL]):
with temporary_dir() as workdir:
for vt in invalidation_check.all_vts:
if not vt.valid:
if self._do_validate_sources_present(vt.target):
self.execute_codegen(vt.target, vt.results_dir, workdir)
self._handle_duplicate_sources(vt.target, vt.results_dir)
# TODO(awinter): mv next few lines to self.custom_copy(target) and then use
# inherited SimpleCodegenTask.execute.
ns_out = self.namespace_out(workdir)
self.cache_generated_files(
self.calculate_generated_sources(vt.target, ns_out), ns_out, vt.results_dir,
)
vt.update()
self._inject_synthetic_target(vt.target, vt.results_dir, vt.cache_key)
def make_json_resource(self, dirname, sources):
"""Return synthetic Resources target that provides the json annotations for a generated ScalaLibrary"""
if os.path.isabs(dirname) or any(os.path.isabs(s) for s in sources):
raise TaskError("Abs paths here will cause cross-machine build invalidation")
return self.context.add_new_target(
address=Address(dirname, 'json'),
target_type=Resources,
sources=sorted(sources),
)
# Passing the intermediate 'workdir' here is a break with the upstream API. But the performance hit of regenerating
# the scalalate workdir and every dependent spindle target was more then I could consider, especially when we are
# able to get the correctness fixes we need from the 'calculate_generated_sources' hackery.
def execute_codegen(self, target, target_workdir, workdir):
tool_args = [
'--template', self.scala_template,
'--namespace_out', self.namespace_out(workdir),
'--working_dir', self.scalate_workdir(workdir),
]
bases = {tgt.target_base for tgt in target.closure() if self.is_gentarget(tgt)}
tool_args.extend(['--thrift_include', ':'.join(bases)])
if self._annotations:
tool_args.extend(['--write_annotations_json', 'true'])
args = tool_args + target.sources_relative_to_buildroot()
self.context.log.debug('Executing: {} {}\n'.format(self.spindle_target.main, ' '.join(args)))
result = self.runjava(
classpath=[self.spindle_binary],
jvm_options=self._jvm_options,
main=self.spindle_target.main,
args=args,
workunit_name='spindle-codegen',
)
if result != 0:
raise TaskError('Spindle codegen exited non-zero ({})'.format(result))
def cache_generated_files(self, generated_files, src, dst):
"""Copy a list of paths between directory roots, creating subdirs as needed.
Note: this isn't the same as artifact caching.
"""
self.context.log.debug('Moving the following files to {}:\n {}'.format(dst, ' '.join(generated_files)))
for gen_file in generated_files:
safe_mkdir(os.path.join(dst, os.path.dirname(gen_file)))
new_path = os.path.join(dst, gen_file)
old_path = os.path.join(src, gen_file)
shutil.copy2(old_path, new_path)
def calculate_generated_sources(self, target, ns_out=None):
generated_scala_sources = [
'{0}.{1}'.format(source, 'scala')
for source in self.sources_generated_by_target(target)
]
# generate json.
if ns_out is not None:
gen_json = [
'.'.join(source.split('/')) + '.json'
for source in self.sources_generated_by_target(target)
]
# note: json only created when annotations are nonempty, hence the need to check presence
actual_json = filter(
lambda f: ns_out and os.path.exists(os.path.join(ns_out, f)),
gen_json
)
else:
# note: this case is for SpindleStubsGen
actual_json = []
return generated_scala_sources + actual_json
def sources_generated_by_target(self, target):
return [
relative_genned_source
for thrift_source in target.sources_relative_to_buildroot()
for relative_genned_source in self.calculate_genfiles(thrift_source)
]
# Hacky way to figure out which files get generated from a particular thrift source.
# TODO: This could be emitted by the codegen tool.
# That would also allow us to easily support 1:many codegen.
@staticmethod
def calculate_genfiles(source, lang=None):
gen_lang = lang if lang else 'java'
abs_source = os.path.join(get_buildroot(), source)
with open(abs_source, 'r') as thrift:
lines = thrift.readlines()
namespaces = {}
for line in lines:
match = NAMESPACE_PARSER.match(line)
if match and match.group(1) == gen_lang:
namespace = match.group(2)
namespaces[gen_lang] = namespace
# After we find gen_lang namespace we can stop reading the file.
break
def calculate_scala_record_genfiles(namespace, source):
"""Returns the generated file basenames, add the file extension to get the full path."""
basepath = namespace.replace('.', '/')
name = os.path.splitext(os.path.basename(source))[0]
return [os.path.join(basepath, name)]
namespace = namespaces.get(gen_lang)
if not namespace:
raise TaskError('No namespace provided in source: {}'.format(abs_source))
return calculate_scala_record_genfiles(namespace, abs_source)
|
|
"""
Django settings for pinecast project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import logging
import os
import sys
import mimetypes
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET', 'p)r2w-c!m^znb%2ppj0rxp9uu$+$q928w#*$41y5(eu$friqqv')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', 'True') == 'True'
DEBUG_TOOLBAR = os.environ.get('DEBUG_TOOLBAR', 'False') == 'True'
STAGING = os.environ.get('STAGING') == 'True'
TESTING = len(sys.argv) > 1 and sys.argv[1] == 'test'
if DEBUG:
ALLOWED_HOSTS = ['*']
else:
ALLOWED_HOSTS = ['pinecast.herokuapp.com', 'pinecast.com', 'pinecast.co', '.pinecast.co', 'tips.pinecast.com']
if STAGING:
ALLOWED_HOSTS.append('pinecast-staging.herokuapp.com')
ALLOWED_HOSTS.append('next.pinecast.com')
ALLOWED_HOSTS.append('tips.next.pinecast.com')
if os.environ.get('ADMIN_IP'):
INTERNAL_IPS = [os.environ.get('ADMIN_IP')]
else:
INTERNAL_IPS = []
if DEBUG_TOOLBAR:
print('Loading Django Debug Toolbar')
mimetypes.add_type("image/svg+xml", ".svg", True)
DATA_UPLOAD_MAX_MEMORY_SIZE = 1024 * 1024 * 5 # Required for RSS feed submission on import
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
'accounts',
'assets',
'analytics',
'dashboard',
'feedback',
'notifications',
'payments',
'podcasts',
'sites',
)
MIDDLEWARE_CLASSES = (
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'payments.middleware.tips_site.TipsSubdomainMiddleware',
'sites.middleware.subdomains.SubdomainMiddleware',
'pinecast.middleware.perf.PerfMiddleware',
'pinecast.middleware.hnredirect.HostnameRedirect',
'pinecast.middleware.tsredirect.TrailingSlashRedirect',
)
ROOT_URLCONF = 'pinecast.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'APP_DIRS': True,
'DIRS': [
os.path.join(BASE_DIR, 'templates', 'jinja2'),
],
'OPTIONS': {
'environment': 'pinecast.jinja2_helper.environment',
},
},
# This is needed for the admin app
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
SILENCED_SYSTEM_CHECKS = ['urls.W002']
WSGI_APPLICATION = 'pinecast.wsgi.application'
ADMINS = [
('basta', 'mattbasta@gmail.com'),
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
'propagate': True,
},
},
}
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {}
try:
import dj_database_url
prod_db = dj_database_url.config()
assert prod_db, 'No DB config found...'
print('Using prod database')
DATABASES['default'] = prod_db
DATABASES['default']['CONN_MAX_AGE'] = 500
except Exception:
print('Using SQLite db')
DATABASES['default'] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'OPTIONS': {'timeout': 5},
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_THOUSAND_SEPARATOR = True
USE_TZ = False
def show_debug_toolbar(req):
if req.is_ajax():
return False
return (
req.META.get('REMOTE_ADDR') in INTERNAL_IPS or
req.META.get('HTTP_CF_CONNECTING_IP') in INTERNAL_IPS or
not INTERNAL_IPS
)
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': show_debug_toolbar,
}
TS_OMNIBUS = {
'hostname': os.environ.get('TS_HOSTNAME', 'pinecast-js.s3.amazonaws.com'),
'version': os.environ.get('TS_VERSION', '1'),
}
SITE_BUILDER = {
'hostname': os.environ.get('SB_HOSTNAME', 'pinecast-js.s3.amazonaws.com'),
'version': os.environ.get('SB_VERSION', '1'),
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = STATIC_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATIC_ROOT = STATIC_DIRS[0] + 'root'
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
S3_BUCKET = os.environ.get('S3_BUCKET')
S3_LOGS_BUCKET = os.environ.get('S3_LOGS_BUCKET')
S3_ACCESS_ID = os.environ.get('S3_ACCESS_ID')
S3_SECRET_KEY = os.environ.get('S3_SECRET_KEY')
SES_ACCESS_ID = os.environ.get('SES_ACCESS_ID')
SES_SECRET_KEY = os.environ.get('SES_SECRET_KEY')
KINESIS_ACCESS_ID = os.environ.get('KINESIS_ACCESS_ID')
KINESIS_SECRET_KEY = os.environ.get('KINESIS_SECRET_KEY')
UPLOAD_HOST = 'd34bo1v665hk6e.cloudfront.net'
RECAPTCHA_KEY = os.environ.get('RECAPTCHA_KEY')
RECAPTCHA_SECRET = os.environ.get('RECAPTCHA_SECRET')
STRIPE_API_KEY = os.environ.get('STRIPE_API_KEY')
STRIPE_PUBLISHABLE_KEY = os.environ.get('STRIPE_PUBLISHABLE_KEY')
LAMBDA_ACCESS_SECRET = os.environ.get('LAMBDA_ACCESS_SECRET')
RSS_FETCH_ENDPOINT = os.environ.get('RSS_FETCH_ENDPOINT')
DEPLOY_SLACKBOT_URL = os.environ.get('DEPLOY_SLACKBOT_URL')
INTERCOM_SECRET = os.environ.get('INTERCOM_SECRET')
INTERCOM_ACCESS_TOKEN = os.environ.get('INTERCOM_ACCESS_TOKEN')
SPOTIFY_TOKEN = os.environ.get('SPOTIFY_TOKEN')
MAX_FILE_SIZE = 1024 * 1024 * 256
EMAIL_CONFIRMATION_MAX_AGE = 3600 * 24 * 2 # Two days
SUPPORT_URL = 'https://help.pinecast.com'
SUPPORT_EMAIL = 'support@pinecast.zendesk.com'
SENDER_EMAIL = 'Matt@pinecast.com'
ANALYTICS_PROVIDER = os.environ.get('ANALYTICS_PROVIDER', 'apg')
APG_CONNSTRING_READ = os.environ.get('APG_CONNSTRING_READ')
APG_CONNSTRING_WRITE = os.environ.get('APG_CONNSTRING_WRITE')
APG_CONN_POOL_SIZE = int(os.environ.get('APG_CONN_POOL_SIZE', '20'))
APG_DB_SUBSCRIPTION = os.environ.get('APG_DB_SUBSCRIPTION', 'subscriptions.subscriptions')
APG_DB_LISTEN = os.environ.get('APG_DB_SUBSCRIPTION', 'listens.listens')
APG_CONDITION_OVERRIDES = {}
FORCE_EPISODE_SPARKLINE_OVERRIDE = False
CHALLENGE_URL = os.environ.get('CHALLENGE_URL')
CHALLENGE_RESPONSE = os.environ.get('CHALLENGE_RESPONSE')
REFERRAL_DISCOUNT = 40 # percent off
REFERRAL_DISCOUNT_DURATION = 4 # months
NPLUSONE_LOGGER = logging.getLogger('nplusone')
NPLUSONE_LOG_LEVEL = logging.ERROR
DISABLE_CONCURRENCY = os.environ.get('DISABLE_CONCURRENCY') == 'True'
FEED_GZIP = os.environ.get('FEED_GZIP') == 'True'
FEED_STREAMING = os.environ.get('FEED_STREAMING') == 'True'
try:
from .settings_local import *
except ImportError:
pass
ROLLBAR = {
'access_token': os.environ.get('ROLLBAR_ACCESS_TOKEN'),
'environment': 'development' if DEBUG else ('staging' if STAGING else 'production'),
'exception_level_filters': [
('django.http.Http404', 'ignored'),
],
'branch': 'master',
'root': os.getcwd(),
'capture_email': True,
}
ROLLBAR_POST_CLIENT_ITEM = os.environ.get('ROLLBAR_POST_CLIENT_ITEM')
if DEBUG:
INSTALLED_APPS = INSTALLED_APPS + ('django_nose', 'nplusone.ext.django', )
MIDDLEWARE_CLASSES = ('nplusone.ext.django.NPlusOneMiddleware', ) + MIDDLEWARE_CLASSES
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
if DEBUG_TOOLBAR:
INSTALLED_APPS = INSTALLED_APPS + ('debug_toolbar', )
MIDDLEWARE_CLASSES = ('debug_toolbar.middleware.DebugToolbarMiddleware', ) + MIDDLEWARE_CLASSES
if DISABLE_CONCURRENCY:
DEBUG_TOOLBAR_PANELS = (
'djdt_flamegraph.FlamegraphPanel',
)
if not DEBUG:
MIDDLEWARE_CLASSES = MIDDLEWARE_CLASSES + ('rollbar.contrib.django.middleware.RollbarNotifierMiddleware', )
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2013 Sebastien Helleu <flashcode@flashtux.org>
# Copyright (C) 2011 xt <xt@bash.no>
# Copyright (C) 2012 Filip H.F. "FiXato" Slagter <fixato+weechat+urlserver@gmail.com>
# Copyright (C) 2012 WillyKaze <willykaze@willykaze.org>
# Copyright (C) 2013 Thomas Kindler <mail_weechat@t-kindler.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Shorten URLs with own HTTP server.
# (this script requires Python >= 2.6)
#
# How does it work?
#
# 1. The URLs displayed in buffers are shortened and stored in memory (saved in
# a file when script is unloaded).
# 2. URLs shortened can be displayed below messages, in a dedicated buffer, or
# as HTML page in your browser.
# 3. This script embeds an HTTP server, which will redirect shortened URLs
# to real URL and display list of all URLs if you browse address without URL key.
# 4. It is recommended to customize/protect the HTTP server using script options
# (see /help urlserver)
#
# Example after message:
#
# FlashCode | look at this: http://test.server.com/this-is-a-long-url
# | [ http://myhost.org:1234/8aK ]
#
# Example inside message:
#
# FlashCode | look at this: http://test.server.com/this-is-a-long-url [ http://myhost.org:1234/8aK ]
#
# List of URLs:
# - in WeeChat: /urlserver
# - in browser: http://myhost.org:1234/
#
# History:
#
# 2013-05-04, Thomas Kindler <mail_weechat@t-kindler.de>
# version 1.2: added a "http_scheme_display" option. This makes it possible to run
# the server behind a reverse proxy with https:// URLs.
# 2013-03-25, Hermit (@irc.freenode.net):
# version 1.1: made links relative in the html, so that they can be followed when accessing
# the listing remotely using the weechat box's IP directly.
# 2012-12-12, WillyKaze <willykaze@willykaze.org>:
# version 1.0: add options "http_time_format", "display_msg_in_url" (works with relay/irc),
# "color_in_msg", "separators"
# 2012-04-18, Filip H.F. "FiXato" Slagter <fixato+weechat+urlserver@gmail.com>:
# version 0.9: add options "http_autostart", "http_port_display"
# "url_min_length" can now be set to -1 to auto-detect minimal url length
# Also, if port is 80 now, :80 will no longer be added to the shortened url.
# 2012-04-17, Filip H.F. "FiXato" Slagter <fixato+weechat+urlserver@gmail.com>:
# version 0.8: add more CSS support by adding options "http_fg_color", "http_css_url",
# and "http_title", add descriptive classes to most html elements.
# See https://raw.github.com/FiXato/weechat_scripts/master/urlserver/sample.css
# for a sample css file that can be used for http_css_url
# 2012-04-11, Sebastien Helleu <flashcode@flashtux.org>:
# version 0.7: fix truncated HTML page (thanks to xt), fix base64 decoding with Python 3.x
# 2012-01-19, Sebastien Helleu <flashcode@flashtux.org>:
# version 0.6: add option "http_hostname_display"
# 2012-01-03, Sebastien Helleu <flashcode@flashtux.org>:
# version 0.5: make script compatible with Python 3.x
# 2011-10-31, Sebastien Helleu <flashcode@flashtux.org>:
# version 0.4: add options "http_embed_youtube_size" and "http_bg_color",
# add extensions jpeg/bmp/svg for embedded images
# 2011-10-30, Sebastien Helleu <flashcode@flashtux.org>:
# version 0.3: escape HTML chars for page with list of URLs, add option
# "http_prefix_suffix", disable highlights on urlserver buffer
# 2011-10-30, Sebastien Helleu <flashcode@flashtux.org>:
# version 0.2: fix error on loading of file "urlserver_list.txt" when it is empty
# 2011-10-30, Sebastien Helleu <flashcode@flashtux.org>:
# version 0.1: initial release
#
SCRIPT_NAME = 'urlserver'
SCRIPT_AUTHOR = 'Sebastien Helleu <flashcode@flashtux.org>'
SCRIPT_VERSION = '1.2'
SCRIPT_LICENSE = 'GPL3'
SCRIPT_DESC = 'Shorten URLs with own HTTP server'
SCRIPT_COMMAND = 'urlserver'
SCRIPT_BUFFER = 'urlserver'
import_ok = True
try:
import weechat
except ImportError:
print('This script must be run under WeeChat.')
print('Get WeeChat now at: http://www.weechat.org/')
import_ok = False
try:
import sys, os, string, ast, datetime, socket, re, base64, cgi
except ImportError as message:
print('Missing package(s) for %s: %s' % (SCRIPT_NAME, message))
import_ok = False
# regex are from urlbar.py, written by xt
url_octet = r'(?:2(?:[0-4]\d|5[0-5])|1\d\d|\d{1,2})'
url_ipaddr = r'%s(?:\.%s){3}' % (url_octet, url_octet)
url_label = r'[0-9a-z][-0-9a-z]*[0-9a-z]?'
url_domain = r'%s(?:\.%s)*\.[a-z][-0-9a-z]*[a-z]?' % (url_label, url_label)
urlserver = {
'socket' : None,
'hook_fd' : None,
'regex' : re.compile(r'(\w+://(?:%s|%s)(?::\d+)?(?:/[^\])>\s]*)?)' % (url_domain, url_ipaddr), re.IGNORECASE),
'urls' : {},
'number' : 0,
'buffer' : '',
}
# script options
urlserver_settings_default = {
# HTTP server settings
'http_autostart' : ('on', 'start the built-in HTTP server automatically)'),
'http_scheme_display': ('http', 'display this scheme in shortened URLs'),
'http_hostname' : ('localhost', 'force hostname/IP in bind of socket (empty value = auto-detect current hostname)'),
'http_hostname_display': ('', 'display this hostname in shortened URLs'),
'http_port' : ('1234', 'force port for listening (empty value = find a random free port)'),
'http_port_display' : ('', 'display this port in shortened URLs. Useful if you forward a different external port to the internal port'),
'http_allowed_ips' : ('', 'regex for IPs allowed to use server (example: "^(123.45.67.89|192.160.*)$")'),
'http_auth' : ('', 'login and password (format: "login:password") required to access to page with list of URLs'),
'http_url_prefix' : ('', 'prefix to add in URLs to prevent external people to scan your URLs (for example: prefix "xx" will give URL: http://host.com:1234/xx/8)'),
'http_bg_color' : ('#f4f4f4', 'background color for HTML page'),
'http_fg_color' : ('#000', 'foreground color for HTML page'),
'http_css_url' : ('', 'URL of external Cascading Style Sheet to add (BE CAREFUL: the HTTP referer will be sent to site hosting CSS file!) (empty value = use default embedded CSS)'),
'http_embed_image' : ('off', 'embed images in HTML page (BE CAREFUL: the HTTP referer will be sent to site hosting image!)'),
'http_embed_youtube' : ('off', 'embed youtube videos in HTML page (BE CAREFUL: the HTTP referer will be sent to youtube!)'),
'http_embed_youtube_size': ('480*350', 'size for embedded youtube video, format is "xxx*yyy"'),
'http_prefix_suffix' : (' ', 'suffix displayed between prefix and message in HTML page'),
'http_title' : ('WeeChat URLs', 'title of the HTML page'),
'http_time_format' : ('%d/%m/%y %H:%M:%S', 'time format in the HTML page'),
# message filter settings
'msg_ignore_buffers' : ('perl.chanmon,irc.etsy.#chef', 'comma-separated list (without spaces) of buffers to ignore (full name like "irc.freenode.#weechat")'),
'msg_ignore_tags' : ('irc_quit,irc_part,notify_none', 'comma-separated list (without spaces) of tags (or beginning of tags) to ignore (for example, use "notify_none" to ignore self messages or "nick_weebot" to ignore messages from nick "weebot")'),
'msg_require_tags' : ('nick_', 'comma-separated list (without spaces) of tags (or beginning of tags) required to shorten URLs (for example "nick_" to shorten URLs only in messages from other users)'),
'msg_ignore_regex' : ('', 'ignore messages matching this regex'),
'msg_ignore_dup_urls': ('off', 'ignore duplicated URLs (do not add an URL in list if it is already)'),
# display settings
'color' : ('darkgray', 'color for urls displayed after message'),
'color_in_msg' : ('', 'color for urls displayed inside irc message: it is a number (irc color) between 00 and 15 (see doc for a list of irc colors)'),
'separators' : ('[|]', 'separators for short url list (string with exactly 3 chars)'),
'display_urls' : ('on', 'display URLs below messages'),
'display_urls_in_msg': ('off', 'add shorten url next to the original url (only in IRC messages) (useful for urlserver behind relay/irc)'),
'url_min_length' : ('0', 'minimum length for an URL to be shortened (0 = shorten all URLs, -1 = detect length based on shorten URL)'),
'urls_amount' : ('100', 'number of URLs to keep in memory (and in file when script is not loaded)'),
'buffer_short_name' : ('off', 'use buffer short name on dedicated buffer'),
'debug' : ('off', 'print some debug messages'),
}
urlserver_settings = {}
def base62_encode(number):
"""Encode a number in base62 (all digits + a-z + A-Z)."""
base62chars = string.digits + string.ascii_letters
l = []
while number > 0:
remainder = number % 62
number = number // 62
l.insert(0, base62chars[remainder])
return ''.join(l) or '0'
def base62_decode(str_value):
"""Decode a base62 string (all digits + a-z + A-Z) to a number."""
base62chars = string.digits + string.ascii_letters
return sum([base62chars.index(char) * (62 ** (len(str_value) - index - 1)) for index, char in enumerate(str_value)])
def base64_decode(s):
if sys.version_info >= (3,):
# python 3.x
return base64.b64decode(s.encode('utf-8'))
else:
# python 2.x
return base64.b64decode(s)
def urlserver_get_hostname(full=True):
"""Return hostname with port number if != default port for the protocol."""
global urlserver_settings
scheme = urlserver_settings['http_scheme_display']
hostname = urlserver_settings['http_hostname_display'] or urlserver_settings['http_hostname'] or socket.getfqdn()
# If the built-in HTTP server isn't running, default to port from settings
port = urlserver_settings['http_port']
if len(urlserver_settings['http_port_display']) > 0:
port = urlserver_settings['http_port_display']
elif urlserver['socket']:
port = urlserver['socket'].getsockname()[1]
# Don't add :port if the port matches the default port for the protocol
prefixed_port = ':%s' % port
if scheme == "http" and prefixed_port == ':80':
prefixed_port = ''
elif scheme == "https" and prefixed_port == ':443':
prefixed_port = ''
prefix = ''
if urlserver_settings['http_url_prefix']:
prefix = '%s/' % urlserver_settings['http_url_prefix']
if full:
return '%s://%s%s/%s' % (scheme, hostname, prefixed_port, prefix)
else:
return '/%s' % prefix
def urlserver_short_url(number, full=True):
"""Return short URL with number."""
return '%s%s' % (urlserver_get_hostname(full), base62_encode(number))
def urlserver_server_reply(conn, code, extra_header, message, mimetype='text/html'):
"""Send a HTTP reply to client."""
global urlserver_settings
if extra_header:
extra_header += '\r\n'
s = 'HTTP/1.1 %s\r\n' \
'%s' \
'Content-Type: %s\r\n' \
'Content-Length: %d\r\n' \
'\r\n' \
% (code, extra_header, mimetype, len(message))
msg = None
if sys.version_info >= (3,):
# python 3.x
if type(message) is bytes:
msg = s.encode('utf-8') + message
else:
msg = s.encode('utf-8') + message.encode('utf-8')
else:
# python 2.x
msg = s + message
if urlserver_settings['debug'] == 'on':
weechat.prnt('', 'urlserver: sending %d bytes' % len(msg))
conn.sendall(msg)
def urlserver_server_favicon():
"""Return favicon for HTML page."""
s = 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QA/wD/AP+gvaeTAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH1wgcDwo4MOEy+AAAAB10' \
'RVh0Q29tbWVudABDcmVhdGVkIHdpdGggVGhlIEdJTVDvZCVuAAADp0lEQVQ4y12TXUxbBRTH/733ttx+XFq+Ci2Ur8AEYSBIosnS+YJhauJY9mQyXfTBh8UEnclm1Gxq' \
'YoJGfZkzqAkmQ2IicVOcwY1YyMZwWcfXVq2Ulpax3raX2y9ob29v7+31QViIJzkPJyfn//D7/48G+2r+6jzaO9oHFCjPCTkBM7MzF06eOhng/uHMFE2dUopKExthb3cf' \
'6h4DUAAAar+A3WB/b21l7a3pW9NLPW099Vk+axn9aPRDkROvX3FdiRtpo9LX0XeMJMm/FUW5DQDE3vHSN0t9vhXfy+eGz00hjgk6TZfcWXajlq79ePLyZGLog6Gvm7XN' \
'nPumO+50HnYAIB8J+P3cMzmL+oVAy1ZdRhdykA4bp6YT5z/79PjaVtDJ+ThxeHCYSOUzWn17eebs2fMvAWgBoCEAIBTiS1cDG81b8azZz/rrT4+f/qWm92D2wUY6H91O' \
'VfFhvnFkZiQRKRWnNzfj4fn5RSOA0kcM4nwhHRckQRLFwoBx4Ljd3pD3eoKNNkeDoaDTSzIvM2cqz7zLJKsVylphG//ynd8B8ABUCgC8Xn+oxt5W7V2aS99JuANP9th6' \
'9RtsUlbNjNZkk+5tTwRjmXisK1t5gJR1qsfjDu4K/Mdg7PtPuFSKzEWS6Xi6QTdlau9ZD4Y22EgkI7KxVEZqrVgwveC8nJX08vLKQhSAB4CAPZLJZLjY2fnqYCLBV6ga' \
'ISuROdP9xRthhnkMeVGtIQijXGo+0JTZEYzXrg8vdB0u8/Yeakj57sWEPRvVCLuzIgg0XdW3fSySdHX4A7N3+a3sH2LOQlNka1ssmmwKP2T1BJkKOJ5ST/hXN50ACAIA' \
'Pv+2W0+UT2WFrNFkLsmVRP0PxaefNehl5b5nZ8di0OjUGp3d5eCE0fX+18nauh7u+a4jhVcAmKnvrtnrWTY6bKwcrxILUtHe5CVaWtPKE/3ki8s3Lk1aLIiq8NrrD/os' \
'ZfZIvdVBqWSKkoNzSgYAQ5gZ4bXNQNw0cZF/P8r6fq4zJ9ORkDTXXCdrkNZo+49eon8d41apbYGjZTVlJSmfSdKE3a7cVwBYqopWDEecupYTg+TQny53uK6qkPL8Jcw+' \
'3sh0LjbL1jZbkbwwEtgmCW2C47X5GhOhXw9oWABhADL12w/qxSIpEz/9mI9JucIsw6hzxaK6tBMyVE9dTWbKrMqb01OoUyXdrQfhAvP2G3S5y1W4CyC5/xF1u63Zy0Z1' \
'mZ7ejSv5v50OQMnujH8BbzDFpcdRAIIAAAAASUVORK5CYII='
return base64_decode(s)
def urlserver_server_reply_list(conn, sort='-time'):
"""Send list of URLs as HTML page to client."""
global urlserver, urlserver_settings
content = '<div class="urls">\n<table id="urls_table">\n'
if not sort.startswith('-'):
sort = '+%s' % sort
if sort[1:] == 'time':
urls = sorted(urlserver['urls'].items())
else:
idx = ['time', 'nick', 'buffer'].index(sort[1:])
urls = sorted(urlserver['urls'].items(), key=lambda url: url[1][idx].lower())
if sort.startswith('-'):
urls.reverse()
sortkey = { '-': ('', '↑'), '+': ('-', '↓') }
prefix = ''
if urlserver_settings['http_url_prefix']:
prefix = '%s/' % urlserver_settings['http_url_prefix']
content += ' <tr>'
for column, defaultsort in (('time', '-'), ('nick', ''), ('buffer', '')):
if sort[1:] == column:
content += '<th class="sortable sorted_by %s_header"><a href="/%ssort=%s%s">%s</a> %s</th>' % (column, prefix, sortkey[sort[0]][0], column, column.capitalize(), sortkey[sort[0]][1])
else:
content += '<th class="sortable %s_header"><a class="sort_link" href="/%ssort=%s%s">%s</a></th>' % (column, prefix, defaultsort, column, column.capitalize())
content += '<th class="unsortable message_header">URLs</th>'
content += '</tr>\n'
for key, item in urls:
content += ' <tr>'
url = item[3]
obj = ''
message = cgi.escape(item[4].replace(url, '\x01\x02\x03\x04')).split('\t', 1)
message[0] = '<span class="prefix">%s</span>' % message[0]
message[1] = '<span class="message">%s</span>' % message[1]
strjoin = '<span class="prefix_suffix"> %s </span>' % urlserver_settings['http_prefix_suffix'].replace(' ', ' ')
message = strjoin.join(message).replace('\x01\x02\x03\x04', '</span><a class="url" href="%s" title="%s">%s</a><span class="message">' % (urlserver_short_url(key, False), url, url))
if urlserver_settings['http_embed_image'] == 'on' and url.lower().endswith(('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.svg')):
obj = '<div class="obj"><img src="%s" title="%s" alt="%s"></div>' % (url, url, url)
elif urlserver_settings['http_embed_youtube'] == 'on' and 'youtube.com/' in url:
m = re.search('v=([\w\d]+)', url)
if m:
yid = m.group(1)
try:
size = urlserver_settings['http_embed_youtube_size'].split('*')
width = int(size[0])
height = int(size[1])
except:
width = 480
height = 350
obj = '<div class="obj youtube"><iframe id="%s" type="text/html" width="%d" height="%d" ' \
'src="http://www.youtube.com/embed/%s?enablejsapi=1"></iframe></div>' % (yid, width, height, yid)
content += '<td class="timestamp">%s</td><td class="nick">%s</td><td class="buffer">%s</td><td class="message">' % (item[0], item[1], item[2])
content += '%s%s</td></tr>\n' % (message, obj)
content += '</table>'
if len(urlserver_settings['http_css_url']) > 0:
css = '<link rel="stylesheet" type="text/css" href="%s" />' % urlserver_settings['http_css_url']
else:
css = '<style type="text/css" media="screen">' \
'<!--\n' \
' html { font-family: Verdana, Arial, Helvetica; font-size: 12px; background: %s; color: %s }\n' \
' .urls table { border-collapse: collapse }\n' \
' .urls table td,th { border: solid 1px #cccccc; padding: 4px; font-size: 12px }\n' \
' .timestamp,.nick,.buffer { white-space: nowrap }\n' \
' .sorted_by { font-style: italic; }\n' \
' .obj { margin-top: 1em }\n' \
'-->' \
'</style>\n' % (urlserver_settings['http_bg_color'], urlserver_settings['http_fg_color'])
html = '<html>\n' \
'<head>\n' \
'<title>%s</title>\n' \
'<meta http-equiv="content-type" content="text/html; charset=utf-8" />\n' \
'%s\n' \
'</head>\n' \
'<body>\n%s\n</body>\n' \
'</html>' % (urlserver_settings['http_title'], css, content)
urlserver_server_reply(conn, '200 OK', '', html)
def urlserver_server_fd_cb(data, fd):
"""Callback for server socket."""
global urlserver, urlserver_settings
if not urlserver['socket']:
return weechat.WEECHAT_RC_OK
conn, addr = urlserver['socket'].accept()
if urlserver_settings['debug'] == 'on':
weechat.prnt('', 'urlserver: connection from %s' % str(addr))
if urlserver_settings['http_allowed_ips'] and not re.match(urlserver_settings['http_allowed_ips'], addr[0]):
if urlserver_settings['debug'] == 'on':
weechat.prnt('', 'urlserver: IP not allowed')
conn.close()
return weechat.WEECHAT_RC_OK
data = None
try:
conn.settimeout(0.3)
data = conn.recv(4096).decode('utf-8')
data = data.replace('\r\n', '\n')
except:
return weechat.WEECHAT_RC_OK
replysent = False
sort = '-time'
m = re.search('^GET /(.*) HTTP/.*$', data, re.MULTILINE)
if m:
url = m.group(1)
if urlserver_settings['debug'] == 'on':
weechat.prnt('', 'urlserver: %s' % m.group(0))
if 'favicon.' in url:
urlserver_server_reply(conn, '200 OK', '',
urlserver_server_favicon(), mimetype='image/x-icon')
replysent = True
else:
# check if prefix is ok (if prefix defined in settings)
prefixok = True
if urlserver_settings['http_url_prefix']:
if url.startswith(urlserver_settings['http_url_prefix']):
url = url[len(urlserver_settings['http_url_prefix']):]
if url.startswith('/'):
url = url[1:]
else:
prefixok = False
# prefix ok, go on with url
if prefixok:
if url.startswith('sort='):
# sort asked for list of urls
sort = url[5:]
url = ''
if url:
# short url, read base62 key and redirect to page
number = -1
try:
number = base62_decode(url)
except:
pass
if number >= 0 and number in urlserver['urls']:
# no redirection with "Location:" because it sends HTTP referer
#conn.sendall('HTTP/1.1 302 Found\nLocation: %s\n' % urlserver['urls'][number][2])
urlserver_server_reply(conn, '200 OK', '',
'<meta http-equiv="refresh" content="0; url=%s">' % urlserver['urls'][number][3])
replysent = True
else:
# page with list of urls
authok = True
if urlserver_settings['http_auth']:
auth = re.search('^Authorization: Basic (\S+)$', data, re.MULTILINE)
if not auth or base64_decode(auth.group(1)).decode('utf-8') != urlserver_settings['http_auth']:
authok = False
if authok:
urlserver_server_reply_list(conn, sort)
else:
urlserver_server_reply(conn, '401 Authorization required',
'WWW-Authenticate: Basic realm="%s"' % SCRIPT_NAME, '')
replysent = True
else:
if urlserver_settings['debug'] == 'on':
weechat.prnt('', 'urlserver: prefix missing')
if not replysent:
urlserver_server_reply(conn,
'404 Not found', '',
'<html>\n'
'<head><title>Page not found</title></head>\n'
'<body><h1>Page not found</h1></body>\n'
'</html>')
conn.close()
return weechat.WEECHAT_RC_OK
def urlserver_server_status():
"""Display status of server."""
global urlserver
if urlserver['socket']:
weechat.prnt('', 'URL server listening on %s' % str(urlserver['socket'].getsockname()))
else:
weechat.prnt('', 'URL server not running')
def urlserver_server_start():
"""Start mini HTTP server."""
global urlserver, urlserver_settings
if urlserver['socket']:
weechat.prnt('', 'URL server already running')
return
port = 0
try:
port = int(urlserver_settings['http_port'])
except:
port = 0
urlserver['socket'] = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
urlserver['socket'].setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
urlserver['socket'].bind((urlserver_settings['http_hostname'] or socket.getfqdn(), port))
except Exception as e:
weechat.prnt('', '%sBind error: %s' % (weechat.prefix('error'), e))
urlserver['socket'] = None
urlserver_server_status()
return
urlserver['socket'].listen(5)
urlserver['hook_fd'] = weechat.hook_fd(urlserver['socket'].fileno(), 1, 0, 0, 'urlserver_server_fd_cb', '')
urlserver_server_status()
def urlserver_server_stop():
"""Stop mini HTTP server."""
global urlserver
if urlserver['socket'] or urlserver['hook_fd']:
if urlserver['socket']:
urlserver['socket'].close()
urlserver['socket'] = None
if urlserver['hook_fd']:
weechat.unhook(urlserver['hook_fd'])
urlserver['hook_fd'] = None
weechat.prnt('', 'URL server stopped')
def urlserver_server_restart():
"""Restart mini HTTP server."""
urlserver_server_stop()
urlserver_server_start()
def urlserver_display_url_detail(key, return_url=False):
global urlserver
url = urlserver['urls'][key]
nick = url[1]
if nick:
nick += ' @ '
if return_url:
return urlserver_short_url(key)
else:
weechat.prnt_date_tags(urlserver['buffer'], 0, 'notify_none',
'%s, %s%s%s%s: %s%s%s -> %s' % (url[0],
nick,
weechat.color('chat_buffer'),
url[2],
weechat.color('reset'),
weechat.color(urlserver_settings['color']),
urlserver_short_url(key),
weechat.color('reset'),
url[3]))
def urlserver_buffer_input_cb(data, buffer, input_data):
if input_data in ('q', 'Q'):
weechat.buffer_close(buffer)
return weechat.WEECHAT_RC_OK
def urlserver_buffer_close_cb(data, buffer):
global urlserver
urlserver['buffer'] = ''
return weechat.WEECHAT_RC_OK
def urlserver_open_buffer():
global urlserver, urlserver_settings
if not urlserver['buffer']:
urlserver['buffer'] = weechat.buffer_new(SCRIPT_BUFFER,
'urlserver_buffer_input_cb', '',
'urlserver_buffer_close_cb', '')
if urlserver['buffer']:
weechat.buffer_set(urlserver['buffer'], 'title', 'urlserver')
weechat.buffer_set(urlserver['buffer'], 'localvar_set_no_log', '1')
weechat.buffer_set(urlserver['buffer'], 'time_for_each_line', '0')
weechat.buffer_set(urlserver['buffer'], 'print_hooks_enabled', '0')
weechat.buffer_clear(urlserver['buffer'])
keys = sorted(urlserver['urls'])
for key in keys:
urlserver_display_url_detail(key)
weechat.buffer_set(urlserver['buffer'], 'display', '1')
def urlserver_cmd_cb(data, buffer, args):
"""The /urlserver command."""
global urlserver
if args == 'start':
urlserver_server_start()
elif args == 'restart':
urlserver_server_restart()
elif args == 'stop':
urlserver_server_stop()
elif args == 'status':
urlserver_server_status()
elif args == 'clear':
urlserver['urls'] = {}
urlserver['number'] = 0
weechat.prnt('', 'urlserver: list cleared')
else:
urlserver_open_buffer()
return weechat.WEECHAT_RC_OK
def urlserver_update_urllist(buffer_full_name, buffer_short_name, tags, prefix, message, nick=None):
"""Update urls list and return a list of short urls for message."""
global urlserver, urlserver_settings
# skip ignored buffers
if urlserver_settings['msg_ignore_buffers']:
if buffer_full_name in urlserver_settings['msg_ignore_buffers'].split(','):
return None
listtags = []
if tags:
listtags = tags.split(',')
# skip ignored tags
if urlserver_settings['msg_ignore_tags']:
for itag in urlserver_settings['msg_ignore_tags'].split(','):
for tag in listtags:
if tag.startswith(itag):
return None
# exit if a required tag is missing
if urlserver_settings['msg_require_tags']:
for rtag in urlserver_settings['msg_require_tags'].split(','):
tagfound = False
for tag in listtags:
if tag.startswith(rtag):
tagfound = True
break
if not tagfound:
return None
# ignore message is matching the "msg_ignore_regex"
if urlserver_settings['msg_ignore_regex']:
if re.search(urlserver_settings['msg_ignore_regex'], prefix + '\t' + message):
return None
# extract nick from tags
if not nick:
nick = ''
for tag in listtags:
if tag.startswith('nick_'):
nick = tag[5:]
break
# get URL min length
min_length = 0
try:
min_length = int(urlserver_settings['url_min_length'])
# Detect the minimum length based on shorten url length
if min_length == -1:
min_length = len(urlserver_short_url(urlserver['number'])) + 1
except:
min_length = 0
# shorten URL(s) in message
urls_short = []
for url in urlserver['regex'].findall(message):
if len(url) >= min_length:
if urlserver_settings['msg_ignore_dup_urls'] == 'on':
if [key for key, value in urlserver['urls'].items() if value[3] == url]:
continue
number = urlserver['number']
if not url.startswith(urlserver_get_hostname()): # don't save urls already shorten
urlserver['urls'][number] = (datetime.datetime.now().strftime(urlserver_settings['http_time_format']), nick, buffer_short_name, url, '%s\t%s' % (prefix, message))
urls_short.append(urlserver_short_url(number))
if urlserver['buffer']:
urlserver_display_url_detail(number)
urlserver['number'] += 1
# remove old URLs if we have reach max list size
urls_amount = 50
try:
urls_amount = int(urlserver_settings['urls_amount'])
if urls_amount <= 0:
urls_amount = 50
except:
urls_amount = 50
while len(urlserver['urls']) > urls_amount:
keys = sorted(urlserver['urls'])
del urlserver['urls'][keys[0]]
return urls_short
def urlserver_print_cb(data, buffer, time, tags, displayed, highlight, prefix, message):
"""Callback for message printed in buffer: display short URLs after message."""
global urlserver, urlserver_settings
if urlserver_settings['display_urls'] == 'on':
buffer_full_name = '%s.%s' % (weechat.buffer_get_string(buffer, 'plugin'), weechat.buffer_get_string(buffer, 'name'))
if urlserver_settings['buffer_short_name'] == 'on':
buffer_short_name = weechat.buffer_get_string(buffer, 'short_name')
else:
buffer_short_name = buffer_full_name
urls_short = urlserver_update_urllist(buffer_full_name, buffer_short_name, tags, prefix, message)
if urls_short:
if urlserver_settings['separators'] and len(urlserver_settings['separators']) == 3:
separator = ' %s ' % (urlserver_settings['separators'][1])
urls_string = separator.join(urls_short)
urls_string = '%s %s %s' % (urlserver_settings['separators'][0], urls_string, urlserver_settings['separators'][2])
else:
urls_string = ' | '.join(urls_short)
urls_string = '[ ' + urls_string + ' ]'
weechat.prnt_date_tags(buffer, 0, 'no_log,notify_none', '%s%s' % (weechat.color(urlserver_settings['color']), urls_string))
return weechat.WEECHAT_RC_OK
def urlserver_modifier_irc_cb(data, modifier, modifier_data, string):
"""Modifier for IRC message: add short URLs at the end of IRC message."""
global urlserver, urlserver_settings
if urlserver_settings['display_urls_in_msg'] != 'on':
return string
msg = weechat.info_get_hashtable('irc_message_parse',
{ 'message': string,
'server': modifier_data })
if 'nick' not in msg or 'channel' not in msg or 'arguments' not in msg:
return string
try:
message = msg['arguments'].split(' ', 1)[1]
if message.startswith(':'):
message = message[1:]
except:
return string
if weechat.info_get('irc_is_channel', '%s,%s' % (modifier_data, msg['channel'])) == '1':
name = msg['channel']
else:
name = msg['nick']
buffer_full_name = 'irc.%s.%s' % (modifier_data, name)
if urlserver_settings['buffer_short_name'] == 'on':
buffer_short_name = name
else:
buffer_short_name = buffer_full_name
urls_short = urlserver_update_urllist(buffer_full_name, buffer_short_name, None, msg['nick'], message, msg['nick'])
if urls_short:
if urlserver_settings['separators'] and len(urlserver_settings['separators']) == 3:
separator = ' %s ' % (urlserver_settings['separators'][1])
urls_string = separator.join(urls_short)
urls_string = '%s %s %s' % (urlserver_settings['separators'][0], urls_string, urlserver_settings['separators'][2])
else:
urls_string = ' | '.join(urls_short)
urls_string = '[ ' + urls_string + ' ]'
if urlserver_settings['color_in_msg']:
urls_string = '\x03%s%s' % (urlserver_settings['color_in_msg'], urls_string)
string = "%s %s" % (string, urls_string)
return string
def urlserver_config_cb(data, option, value):
"""Called when a script option is changed."""
global urlserver_settings
pos = option.rfind('.')
if pos > 0:
name = option[pos+1:]
if name in urlserver_settings:
if name == 'http_allowed_ips':
urlserver_settings[name] = re.compile(value)
else:
urlserver_settings[name] = value
if name in ('http_hostname', 'http_port'):
# Don't restart if autostart is disabled and server isn't already running
if urlserver_settings['http_autostart'] == 'on' or urlserver['socket']:
urlserver_server_restart()
return weechat.WEECHAT_RC_OK
def urlserver_filename():
"""Return name of file used to store list of urls."""
return os.path.join(weechat.info_get('weechat_dir', ''), 'urlserver_list.txt')
def urlserver_read_urls():
"""Read file with URLs."""
global urlserver
filename = urlserver_filename()
if os.path.isfile(filename):
urlserver['number'] = 0
try:
urlserver['urls'] = ast.literal_eval(open(filename, 'r').read())
keys = sorted(urlserver['urls'])
if keys:
urlserver['number'] = keys[-1] + 1
else:
urlserver['number'] = 0
except:
weechat.prnt('', '%surlserver: error reading file "%s"' % (weechat.prefix('error'), filename))
def urlserver_write_urls():
"""Write file with URLs."""
global urlserver
keys = sorted(urlserver['urls'])
content = '{\n%s\n}\n' % '\n'.join([' %d: %s,' % (key, str(urlserver['urls'][key])) for key in keys])
open(urlserver_filename(), 'w').write(content)
def urlserver_end():
"""Script unloaded (oh no, why?)"""
urlserver_server_stop()
urlserver_write_urls()
return weechat.WEECHAT_RC_OK
if __name__ == '__main__' and import_ok:
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, 'urlserver_end', ''):
# set default settings
version = weechat.info_get('version_number', '') or 0
for option, value in urlserver_settings_default.items():
if weechat.config_is_set_plugin(option):
urlserver_settings[option] = weechat.config_get_plugin(option)
else:
weechat.config_set_plugin(option, value[0])
urlserver_settings[option] = value[0]
if int(version) >= 0x00030500:
weechat.config_set_desc_plugin(option, '%s (default: "%s")' % (value[1], value[0]))
# detect config changes
weechat.hook_config('plugins.var.python.%s.*' % SCRIPT_NAME, 'urlserver_config_cb', '')
# add command
weechat.hook_command(SCRIPT_COMMAND, SCRIPT_DESC, 'start|restart|stop|status || clear',
' start: start server\n'
'restart: restart server\n'
' stop: stop server\n'
' status: display status of server\n'
' clear: remove all URLs from list\n\n'
'Without argument, this command opens new buffer with list of URLs.\n\n'
'Initial setup:\n'
' - by default, script will listen on a random free port, you can force a port with:\n'
' /set plugins.var.python.urlserver.http_port "1234"\n'
' - you can force an IP or custom hostname with:\n'
' /set plugins.var.python.urlserver.http_hostname "111.22.33.44"\n'
' - it is strongly recommended to restrict IPs allowed and/or use auth, for example:\n'
' /set plugins.var.python.urlserver.http_allowed_ips "^(123.45.67.89|192.160.*)$"\n'
' /set plugins.var.python.urlserver.http_auth "user:password"\n'
' - if you do not like the default HTML formatting, you can override the CSS:\n'
' /set plugins.var.python.urlserver.http_css_url "http://example.com/sample.css"\n'
' See https://raw.github.com/FiXato/weechat_scripts/master/urlserver/sample.css\n'
' - don\'t like the built-in HTTP server to start automatically? Disable it:\n'
' /set plugins.var.python.urlserver.http_autostart "off"\n'
' - have external port 80 or 443 (https) forwarded to your internal server port? Remove :port with:\n'
' /set plugins.var.python.urlserver.http_port_display "80" or "443" respectively\n'
'\n'
'Tip: use URL without key at the end to display list of all URLs in your browser.',
'start|restart|stop|status|clear', 'urlserver_cmd_cb', '')
if urlserver_settings['http_autostart'] == 'on':
# start mini HTTP server
urlserver_server_start()
# load urls from file
urlserver_read_urls()
# catch URLs in buffers
weechat.hook_print('', '', '://', 1, 'urlserver_print_cb', '')
# modify URLS in irc messages (for relay)
weechat.hook_modifier('irc_in2_privmsg', 'urlserver_modifier_irc_cb', '')
weechat.hook_modifier('irc_in2_notice', 'urlserver_modifier_irc_cb', '')
# search buffer
urlserver['buffer'] = weechat.buffer_search('python', SCRIPT_BUFFER)
|
|
from sqlalchemy import Boolean, Column, Date, DateTime, ForeignKey, func, Integer, String, Text, Index
from sqlalchemy.orm import relationship
from dataactcore.models.baseModel import Base
class _FSRSAttributes:
"""Attributes shared by all FSRS models"""
id = Column(Integer, primary_key=True)
duns = Column(String, index=True)
dba_name = Column(String)
principle_place_city = Column(String)
principle_place_street = Column(String, nullable=True)
principle_place_state = Column(String)
principle_place_state_name = Column(String)
principle_place_country = Column(String)
principle_place_zip = Column(String)
principle_place_district = Column(String, nullable=True)
parent_duns = Column(String, index=True)
funding_agency_id = Column(String)
funding_agency_name = Column(String)
top_paid_fullname_1 = Column(String, nullable=True)
top_paid_amount_1 = Column(String, nullable=True)
top_paid_fullname_2 = Column(String, nullable=True)
top_paid_amount_2 = Column(String, nullable=True)
top_paid_fullname_3 = Column(String, nullable=True)
top_paid_amount_3 = Column(String, nullable=True)
top_paid_fullname_4 = Column(String, nullable=True)
top_paid_amount_4 = Column(String, nullable=True)
top_paid_fullname_5 = Column(String, nullable=True)
top_paid_amount_5 = Column(String, nullable=True)
uei_number = Column(String, index=True)
parent_uei = Column(String, index=True)
class _ContractAttributes(_FSRSAttributes):
"""Common attributes of FSRSProcurement and FSRSSubcontracts"""
company_name = Column(String)
bus_types = Column(String)
company_address_city = Column(String)
company_address_street = Column(String, nullable=True)
company_address_state = Column(String)
company_address_state_name = Column(String)
company_address_country = Column(String)
company_address_zip = Column(String)
company_address_district = Column(String, nullable=True)
parent_company_name = Column(String)
naics = Column(String)
funding_office_id = Column(String)
funding_office_name = Column(String)
recovery_model_q1 = Column(Boolean)
recovery_model_q2 = Column(Boolean)
class _GrantAttributes(_FSRSAttributes):
"""Common attributes of FSRSGrant and FSRSSubgrant"""
dunsplus4 = Column(String, nullable=True)
awardee_name = Column(String)
awardee_address_city = Column(String)
awardee_address_street = Column(String, nullable=True)
awardee_address_state = Column(String)
awardee_address_state_name = Column(String)
awardee_address_country = Column(String)
awardee_address_zip = Column(String)
awardee_address_district = Column(String, nullable=True)
cfda_numbers = Column(String)
project_description = Column(String)
compensation_q1 = Column(Boolean)
compensation_q2 = Column(Boolean)
federal_agency_id = Column(String)
federal_agency_name = Column(String)
class _PrimeAwardAttributes:
"""Attributes shared by FSRSProcurements and FSRSGrants"""
internal_id = Column(String)
date_submitted = Column(DateTime)
report_period_mon = Column(String)
report_period_year = Column(String)
@classmethod
def next_id(cls, sess):
# We'll often want to load "new" data -- anything with a later id than the awards we have. Return that max id.
current = sess.query(func.max(cls.id)).one()[0] or -1
return current + 1
class FSRSProcurement(Base, _ContractAttributes, _PrimeAwardAttributes):
__tablename__ = "fsrs_procurement"
contract_number = Column(String, index=True)
idv_reference_number = Column(String, nullable=True)
report_type = Column(String)
contract_agency_code = Column(String)
contract_idv_agency_code = Column(String, nullable=True)
contracting_office_aid = Column(String)
contracting_office_aname = Column(String)
contracting_office_id = Column(String)
contracting_office_name = Column(String)
treasury_symbol = Column(String)
dollar_obligated = Column(String)
date_signed = Column(Date)
transaction_type = Column(String)
program_title = Column(String)
class FSRSSubcontract(Base, _ContractAttributes):
__tablename__ = "fsrs_subcontract"
parent_id = Column(Integer, ForeignKey('fsrs_procurement.id', ondelete='CASCADE'), index=True)
parent = relationship(FSRSProcurement, back_populates='subawards')
subcontract_amount = Column(String)
subcontract_date = Column(Date)
subcontract_num = Column(String)
overall_description = Column(Text)
recovery_subcontract_amt = Column(String, nullable=True)
FSRSProcurement.subawards = relationship(FSRSSubcontract, back_populates='parent')
class FSRSGrant(Base, _GrantAttributes, _PrimeAwardAttributes):
__tablename__ = "fsrs_grant"
fain = Column(String, index=True)
total_fed_funding_amount = Column(String)
obligation_date = Column(Date)
class FSRSSubgrant(Base, _GrantAttributes):
__tablename__ = "fsrs_subgrant"
parent_id = Column(Integer, ForeignKey('fsrs_grant.id', ondelete='CASCADE'), index=True)
parent = relationship(FSRSGrant, back_populates='subawards')
subaward_amount = Column(String)
subaward_date = Column(Date)
subaward_num = Column(String)
FSRSGrant.subawards = relationship(FSRSSubgrant, back_populates='parent')
Index("ix_fsrs_proc_contract_number_upper", func.upper(FSRSProcurement.contract_number))
Index("ix_fsrs_proc_idv_ref_upper", func.upper(FSRSProcurement.idv_reference_number))
Index("ix_fsrs_proc_contract_office_aid_upper", func.upper(FSRSProcurement.contracting_office_aid))
Index("ix_fsrs_grant_fain_upper", func.upper(FSRSGrant.fain))
Index("ix_fsrs_grant_federal_agency_id_upper", func.upper(FSRSGrant.federal_agency_id))
class Subaward(Base):
""" Model for all subaward data """
__tablename__ = "subaward"
id = Column(Integer, primary_key=True)
# File F - Prime Award Data
unique_award_key = Column(Text, index=True)
award_id = Column(Text, index=True)
parent_award_id = Column(Text, index=True)
award_amount = Column(Text)
action_date = Column(Text, index=True)
fy = Column(Text)
awarding_agency_code = Column(Text, index=True)
awarding_agency_name = Column(Text)
awarding_sub_tier_agency_c = Column(Text, index=True)
awarding_sub_tier_agency_n = Column(Text)
awarding_office_code = Column(Text)
awarding_office_name = Column(Text)
funding_agency_code = Column(Text, index=True)
funding_agency_name = Column(Text)
funding_sub_tier_agency_co = Column(Text, index=True)
funding_sub_tier_agency_na = Column(Text)
funding_office_code = Column(Text)
funding_office_name = Column(Text)
awardee_or_recipient_uniqu = Column(Text, index=True)
awardee_or_recipient_uei = Column(Text, index=True)
awardee_or_recipient_legal = Column(Text)
dba_name = Column(Text)
ultimate_parent_unique_ide = Column(Text)
ultimate_parent_uei = Column(Text)
ultimate_parent_legal_enti = Column(Text)
legal_entity_country_code = Column(Text)
legal_entity_country_name = Column(Text)
legal_entity_address_line1 = Column(Text)
legal_entity_city_name = Column(Text)
legal_entity_state_code = Column(Text)
legal_entity_state_name = Column(Text)
legal_entity_zip = Column(Text)
legal_entity_congressional = Column(Text)
legal_entity_foreign_posta = Column(Text)
business_types = Column(Text)
place_of_perform_city_name = Column(Text)
place_of_perform_state_code = Column(Text)
place_of_perform_state_name = Column(Text)
place_of_performance_zip = Column(Text)
place_of_perform_congressio = Column(Text)
place_of_perform_country_co = Column(Text)
place_of_perform_country_na = Column(Text)
award_description = Column(Text)
naics = Column(Text)
naics_description = Column(Text)
cfda_numbers = Column(Text)
cfda_titles = Column(Text)
# File F - Subaward Data
subaward_type = Column(Text, index=True)
subaward_report_year = Column(Text)
subaward_report_month = Column(Text)
subaward_number = Column(Text, index=True)
subaward_amount = Column(Text)
sub_action_date = Column(Text, index=True)
sub_awardee_or_recipient_uniqu = Column(Text, index=True)
sub_awardee_or_recipient_uei = Column(Text, index=True)
sub_awardee_or_recipient_legal = Column(Text)
sub_dba_name = Column(Text)
sub_ultimate_parent_unique_ide = Column(Text)
sub_ultimate_parent_uei = Column(Text)
sub_ultimate_parent_legal_enti = Column(Text)
sub_legal_entity_country_code = Column(Text)
sub_legal_entity_country_name = Column(Text)
sub_legal_entity_address_line1 = Column(Text)
sub_legal_entity_city_name = Column(Text)
sub_legal_entity_state_code = Column(Text)
sub_legal_entity_state_name = Column(Text)
sub_legal_entity_zip = Column(Text)
sub_legal_entity_congressional = Column(Text)
sub_legal_entity_foreign_posta = Column(Text)
sub_business_types = Column(Text)
sub_place_of_perform_city_name = Column(Text)
sub_place_of_perform_state_code = Column(Text)
sub_place_of_perform_state_name = Column(Text)
sub_place_of_performance_zip = Column(Text)
sub_place_of_perform_congressio = Column(Text)
sub_place_of_perform_country_co = Column(Text)
sub_place_of_perform_country_na = Column(Text)
subaward_description = Column(Text)
sub_high_comp_officer1_full_na = Column(Text, nullable=True)
sub_high_comp_officer1_amount = Column(Text, nullable=True)
sub_high_comp_officer2_full_na = Column(Text, nullable=True)
sub_high_comp_officer2_amount = Column(Text, nullable=True)
sub_high_comp_officer3_full_na = Column(Text, nullable=True)
sub_high_comp_officer3_amount = Column(Text, nullable=True)
sub_high_comp_officer4_full_na = Column(Text, nullable=True)
sub_high_comp_officer4_amount = Column(Text, nullable=True)
sub_high_comp_officer5_full_na = Column(Text, nullable=True)
sub_high_comp_officer5_amount = Column(Text, nullable=True)
# Additional FSRS - Prime Award Data
prime_id = Column(Integer, index=True)
internal_id = Column(Text, index=True)
date_submitted = Column(Text)
report_type = Column(Text)
transaction_type = Column(Text)
program_title = Column(Text)
contract_agency_code = Column(Text)
contract_idv_agency_code = Column(Text)
grant_funding_agency_id = Column(Text)
grant_funding_agency_name = Column(Text)
federal_agency_name = Column(Text)
treasury_symbol = Column(Text)
dunsplus4 = Column(Text)
recovery_model_q1 = Column(Text)
recovery_model_q2 = Column(Text)
compensation_q1 = Column(Text)
compensation_q2 = Column(Text)
high_comp_officer1_full_na = Column(Text, nullable=True)
high_comp_officer1_amount = Column(Text, nullable=True)
high_comp_officer2_full_na = Column(Text, nullable=True)
high_comp_officer2_amount = Column(Text, nullable=True)
high_comp_officer3_full_na = Column(Text, nullable=True)
high_comp_officer3_amount = Column(Text, nullable=True)
high_comp_officer4_full_na = Column(Text, nullable=True)
high_comp_officer4_amount = Column(Text, nullable=True)
high_comp_officer5_full_na = Column(Text, nullable=True)
high_comp_officer5_amount = Column(Text, nullable=True)
place_of_perform_street = Column(Text)
# Additional FSRS - Subaward Data
sub_id = Column(Integer, index=True)
sub_parent_id = Column(Integer, index=True)
sub_federal_agency_id = Column(Text)
sub_federal_agency_name = Column(Text)
sub_funding_agency_id = Column(Text)
sub_funding_agency_name = Column(Text)
sub_funding_office_id = Column(Text)
sub_funding_office_name = Column(Text)
sub_naics = Column(Text)
sub_cfda_numbers = Column(Text)
sub_dunsplus4 = Column(Text)
sub_recovery_subcontract_amt = Column(Text)
sub_recovery_model_q1 = Column(Text)
sub_recovery_model_q2 = Column(Text)
sub_compensation_q1 = Column(Text)
sub_compensation_q2 = Column(Text)
sub_place_of_perform_street = Column(Text)
Index("ix_subaward_award_id_upper", func.upper(Subaward.award_id))
Index("ix_subaward_parent_award_id_upper", func.upper(Subaward.parent_award_id))
Index("ix_subaward_awarding_sub_tier_agency_c_upper", func.upper(Subaward.awarding_sub_tier_agency_c))
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Zadara Storage, Inc.
# Copyright (c) 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Zadara VPSA volume driver
"""
import copy
import httplib
from cinder import exception
from cinder import test
from cinder.openstack.common import log as logging
from cinder.volume import zadara
from lxml import etree
LOG = logging.getLogger("cinder.volume.driver")
DEFAULT_RUNTIME_VARS = {
'status': 200,
'user': 'test',
'password': 'test_password',
'access_key': '0123456789ABCDEF',
'volumes': [],
'servers': [],
'controllers': [('active_ctrl', {'display_name': 'test_ctrl'})],
'counter': 1000,
'login': """
<hash>
<user>
<updated-at type="datetime">2012-04-30...</updated-at>
<access-key>%s</access-key>
<id type="integer">1</id>
<created-at type="datetime">2012-02-21...</created-at>
<email>jsmith@example.com</email>
<username>jsmith</username>
</user>
<status type="integer">0</status>
</hash>""",
'good': """
<hash>
<status type="integer">0</status>
</hash>""",
'bad_login': """
<hash>
<status type="integer">5</status>
<status-msg>Some message...</status-msg>
</hash>""",
'bad_volume': """
<hash>
<status type="integer">10081</status>
<status-msg>Virtual volume xxx not found</status-msg>
</hash>""",
'bad_server': """
<hash>
<status type="integer">10086</status>
<status-msg>Server xxx not found</status-msg>
</hash>""",
'server_created': """
<create-server-response>
<server-name>%s</server-name>
<status type='integer'>0</status>
</create-server-response>""",
}
RUNTIME_VARS = None
class FakeRequest(object):
def __init__(self, method, url, body):
self.method = method
self.url = url
self.body = body
self.status = RUNTIME_VARS['status']
def read(self):
ops = {'POST': [('/api/users/login.xml', self._login),
('/api/volumes.xml', self._create_volume),
('/api/servers.xml', self._create_server),
('/api/servers/*/volumes.xml', self._attach),
('/api/volumes/*/detach.xml', self._detach)],
'DELETE': [('/api/volumes/*', self._delete)],
'GET': [('/api/volumes.xml', self._list_volumes),
('/api/vcontrollers.xml', self._list_controllers),
('/api/servers.xml', self._list_servers),
('/api/volumes/*/servers.xml',
self._list_vol_attachments)]
}
ops_list = ops[self.method]
modified_url = self.url.split('?')[0]
for (templ_url, func) in ops_list:
if self._compare_url(modified_url, templ_url):
result = func()
return result
def _compare_url(self, url, template_url):
items = url.split('/')
titems = template_url.split('/')
for (i, titem) in enumerate(titems):
if titem != '*' and titem != items[i]:
return False
return True
def _get_parameters(self, data):
items = data.split('&')
params = {}
for item in items:
if item:
(k, v) = item.split('=')
params[k] = v
return params
def _get_counter(self):
cnt = RUNTIME_VARS['counter']
RUNTIME_VARS['counter'] += 1
return cnt
def _login(self):
params = self._get_parameters(self.body)
if params['user'] == RUNTIME_VARS['user'] and\
params['password'] == RUNTIME_VARS['password']:
return RUNTIME_VARS['login'] % RUNTIME_VARS['access_key']
else:
return RUNTIME_VARS['bad_login']
def _incorrect_access_key(self, params):
if params['access_key'] != RUNTIME_VARS['access_key']:
return True
else:
return False
def _create_volume(self):
params = self._get_parameters(self.body)
if self._incorrect_access_key(params):
return RUNTIME_VARS['bad_login']
params['attachments'] = []
vpsa_vol = 'volume-%07d' % self._get_counter()
RUNTIME_VARS['volumes'].append((vpsa_vol, params))
return RUNTIME_VARS['good']
def _create_server(self):
params = self._get_parameters(self.body)
if self._incorrect_access_key(params):
return RUNTIME_VARS['bad_login']
vpsa_srv = 'srv-%07d' % self._get_counter()
RUNTIME_VARS['servers'].append((vpsa_srv, params))
return RUNTIME_VARS['server_created'] % vpsa_srv
def _attach(self):
params = self._get_parameters(self.body)
if self._incorrect_access_key(params):
return RUNTIME_VARS['bad_login']
srv = self.url.split('/')[3]
vol = params['volume_name[]']
for (vol_name, params) in RUNTIME_VARS['volumes']:
if vol_name == vol:
attachments = params['attachments']
if srv in attachments:
#already attached - ok
return RUNTIME_VARS['good']
else:
attachments.append(srv)
return RUNTIME_VARS['good']
return RUNTIME_VARS['bad_volume']
def _detach(self):
params = self._get_parameters(self.body)
if self._incorrect_access_key(params):
return RUNTIME_VARS['bad_login']
vol = self.url.split('/')[3]
srv = params['server_name[]']
for (vol_name, params) in RUNTIME_VARS['volumes']:
if vol_name == vol:
attachments = params['attachments']
if srv not in attachments:
return RUNTIME_VARS['bad_server']
else:
attachments.remove(srv)
return RUNTIME_VARS['good']
return RUNTIME_VARS['bad_volume']
def _delete(self):
vol = self.url.split('/')[3].split('.')[0]
for (vol_name, params) in RUNTIME_VARS['volumes']:
if vol_name == vol:
if params['attachments']:
# there are attachments - should be volume busy error
return RUNTIME_VARS['bad_volume']
else:
RUNTIME_VARS['volumes'].remove((vol_name, params))
return RUNTIME_VARS['good']
return RUNTIME_VARS['bad_volume']
def _generate_list_resp(self, header, footer, body, lst):
resp = header
for (obj, params) in lst:
resp += body % (obj, params['display_name'])
resp += footer
return resp
def _list_volumes(self):
header = """<show-volumes-response>
<status type='integer'>0</status>
<volumes type='array'>"""
footer = "</volumes></show-volumes-response>"
body = """<volume>
<name>%s</name>
<display-name>%s</display-name>
<status>Available</status>
<virtual-capacity type='integer'>1</virtual-capacity>
<allocated-capacity type='integer'>1</allocated-capacity>
<raid-group-name>r5</raid-group-name>
<cache>write-through</cache>
<created-at type='datetime'>2012-01-28...</created-at>
<modified-at type='datetime'>2012-01-28...</modified-at>
</volume>"""
return self._generate_list_resp(header, footer, body,
RUNTIME_VARS['volumes'])
def _list_controllers(self):
header = """<show-vcontrollers-response>
<status type='integer'>0</status>
<vcontrollers type='array'>"""
footer = "</vcontrollers></show-vcontrollers-response>"
body = """<vcontroller>
<name>%s</name>
<display-name>%s</display-name>
<state>active</state>
<target>iqn.2011-04.com.zadarastorage:vsa-xxx:1</target>
<iscsi-ip>1.1.1.1</iscsi-ip>
<mgmt-ip>1.1.1.1</mgmt-ip>
<software-ver>0.0.09-05.1--77.7</software-ver>
<heartbeat1>ok</heartbeat1>
<heartbeat2>ok</heartbeat2>
<chap-username>test_chap_user</chap-username>
<chap-target-secret>test_chap_secret</chap-target-secret>
</vcontroller>"""
return self._generate_list_resp(header, footer, body,
RUNTIME_VARS['controllers'])
def _list_servers(self):
header = """<show-servers-response>
<status type='integer'>0</status>
<servers type='array'>"""
footer = "</servers></show-servers-response>"
body = """<server>
<name>%s</name>
<display-name>%s</display-name>
<iqn>%s</iqn>
<status>Active</status>
<created-at type='datetime'>2012-01-28...</created-at>
<modified-at type='datetime'>2012-01-28...</modified-at>
</server>"""
resp = header
for (obj, params) in RUNTIME_VARS['servers']:
resp += body % (obj, params['display_name'], params['iqn'])
resp += footer
return resp
def _get_server_obj(self, name):
for (srv_name, params) in RUNTIME_VARS['servers']:
if srv_name == name:
return params
def _list_vol_attachments(self):
vol = self.url.split('/')[3]
header = """<show-servers-response>
<status type="integer">0</status>
<servers type="array">"""
footer = "</servers></show-servers-response>"
body = """<server>
<name>%s</name>
<display-name>%s</display-name>
<iqn>%s</iqn>
<target>iqn.2011-04.com.zadarastorage:vsa-xxx:1</target>
<lun>0</lun>
</server>"""
for (vol_name, params) in RUNTIME_VARS['volumes']:
if vol_name == vol:
attachments = params['attachments']
resp = header
for server in attachments:
srv_params = self._get_server_obj(server)
resp += body % (server,
srv_params['display_name'], srv_params['iqn'])
resp += footer
return resp
return RUNTIME_VARS['bad_volume']
class FakeHTTPConnection(object):
"""A fake httplib.HTTPConnection for zadara volume driver tests."""
def __init__(self, host, port, use_ssl=False):
LOG.debug('Enter: __init__ FakeHTTPConnection')
self.host = host
self.port = port
self.use_ssl = use_ssl
self.req = None
def request(self, method, url, body):
LOG.debug('Enter: request')
self.req = FakeRequest(method, url, body)
def getresponse(self):
LOG.debug('Enter: getresponse')
return self.req
def close(self):
LOG.debug('Enter: close')
self.req = None
class FakeHTTPSConnection(FakeHTTPConnection):
def __init__(self, host, port):
LOG.debug('Enter: __init__ FakeHTTPSConnection')
super(FakeHTTPSConnection, self).__init__(host, port, use_ssl=True)
class ZadaraVPSADriverTestCase(test.TestCase):
"""Test case for Zadara VPSA volume driver"""
def setUp(self):
LOG.debug('Enter: setUp')
super(ZadaraVPSADriverTestCase, self).setUp()
self.flags(
zadara_user='test',
zadara_password='test_password',
)
global RUNTIME_VARS
RUNTIME_VARS = copy.deepcopy(DEFAULT_RUNTIME_VARS)
self.driver = zadara.ZadaraVPSAISCSIDriver()
self.stubs.Set(httplib, 'HTTPConnection', FakeHTTPConnection)
self.stubs.Set(httplib, 'HTTPSConnection', FakeHTTPSConnection)
self.driver.do_setup(None)
def tearDown(self):
super(ZadaraVPSADriverTestCase, self).tearDown()
def test_create_destroy(self):
"""Create/Delete volume."""
volume = {'name': 'test_volume_01', 'size': 1}
self.driver.create_volume(volume)
self.driver.delete_volume(volume)
def test_create_destroy_multiple(self):
"""Create/Delete multiple volumes."""
self.flags(zadara_vpsa_allow_nonexistent_delete=False)
self.driver.create_volume({'name': 'test_volume_01', 'size': 1})
self.driver.create_volume({'name': 'test_volume_02', 'size': 2})
self.driver.create_volume({'name': 'test_volume_03', 'size': 3})
self.driver.delete_volume({'name': 'test_volume_02'})
self.driver.delete_volume({'name': 'test_volume_03'})
self.driver.delete_volume({'name': 'test_volume_01'})
self.assertRaises(exception.VolumeNotFound,
self.driver.delete_volume,
{'name': 'test_volume_04'})
self.flags(zadara_vpsa_allow_nonexistent_delete=True)
self.driver.delete_volume({'name': 'test_volume_04'})
def test_destroy_non_existent(self):
"""Delete non-existent volume."""
self.flags(zadara_vpsa_allow_nonexistent_delete=False)
volume = {'name': 'test_volume_02', 'size': 1}
self.assertRaises(exception.VolumeNotFound,
self.driver.delete_volume,
volume)
self.flags(zadara_vpsa_allow_nonexistent_delete=True)
def test_empty_apis(self):
"""Test empty func (for coverage only)."""
context = None
volume = {'name': 'test_volume_01', 'size': 1}
self.driver.create_export(context, volume)
self.driver.ensure_export(context, volume)
self.driver.remove_export(context, volume)
self.assertRaises(NotImplementedError,
self.driver.create_volume_from_snapshot,
volume, None)
self.assertRaises(NotImplementedError,
self.driver.create_snapshot,
None)
self.assertRaises(NotImplementedError,
self.driver.delete_snapshot,
None)
self.assertRaises(NotImplementedError,
self.driver.local_path,
None)
self.driver.check_for_setup_error()
def test_volume_attach_detach(self):
"""Test volume attachment and detach"""
volume = {'name': 'test_volume_01', 'size': 1, 'id': 123}
connector = dict(initiator='test_iqn.1')
self.driver.create_volume(volume)
props = self.driver.initialize_connection(volume, connector)
self.assertEqual(props['driver_volume_type'], 'iscsi')
data = props['data']
self.assertEqual(data['target_portal'], '1.1.1.1:3260')
self.assertEqual(data['target_iqn'],
'iqn.2011-04.com.zadarastorage:vsa-xxx:1')
self.assertEqual(data['target_lun'], '0')
self.assertEqual(data['volume_id'], 123)
self.assertEqual(data['auth_method'], 'CHAP')
self.assertEqual(data['auth_username'], 'test_chap_user')
self.assertEqual(data['auth_password'], 'test_chap_secret')
self.driver.terminate_connection(volume, connector)
self.driver.delete_volume(volume)
def test_volume_attach_multiple_detach(self):
"""Test multiple volume attachment and detach"""
volume = {'name': 'test_volume_01', 'size': 1, 'id': 123}
connector1 = dict(initiator='test_iqn.1')
connector2 = dict(initiator='test_iqn.2')
connector3 = dict(initiator='test_iqn.3')
self.driver.create_volume(volume)
props1 = self.driver.initialize_connection(volume, connector1)
props2 = self.driver.initialize_connection(volume, connector2)
props3 = self.driver.initialize_connection(volume, connector3)
self.driver.terminate_connection(volume, connector1)
self.driver.terminate_connection(volume, connector3)
self.driver.terminate_connection(volume, connector2)
self.driver.delete_volume(volume)
def test_wrong_attach_params(self):
"""Test different wrong attach scenarios"""
volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101}
volume2 = {'name': 'test_volume_02', 'size': 1, 'id': 102}
volume3 = {'name': 'test_volume_03', 'size': 1, 'id': 103}
connector1 = dict(initiator='test_iqn.1')
connector2 = dict(initiator='test_iqn.2')
connector3 = dict(initiator='test_iqn.3')
self.assertRaises(exception.VolumeNotFound,
self.driver.initialize_connection,
volume1, connector1)
def test_wrong_detach_params(self):
"""Test different wrong detachment scenarios"""
volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101}
volume2 = {'name': 'test_volume_02', 'size': 1, 'id': 102}
volume3 = {'name': 'test_volume_03', 'size': 1, 'id': 103}
connector1 = dict(initiator='test_iqn.1')
connector2 = dict(initiator='test_iqn.2')
connector3 = dict(initiator='test_iqn.3')
self.driver.create_volume(volume1)
self.driver.create_volume(volume2)
props1 = self.driver.initialize_connection(volume1, connector1)
props2 = self.driver.initialize_connection(volume2, connector2)
self.assertRaises(exception.ZadaraServerNotFound,
self.driver.terminate_connection,
volume1, connector3)
self.assertRaises(exception.VolumeNotFound,
self.driver.terminate_connection,
volume3, connector1)
self.assertRaises(exception.FailedCmdWithDump,
self.driver.terminate_connection,
volume1, connector2)
def test_wrong_login_reply(self):
"""Test wrong login reply"""
RUNTIME_VARS['login'] = """<hash>
<access-key>%s</access-key>
<status type="integer">0</status>
</hash>"""
self.assertRaises(exception.MalformedResponse,
self.driver.do_setup, None)
RUNTIME_VARS['login'] = """
<hash>
<user>
<updated-at type="datetime">2012-04-30...</updated-at>
<id type="integer">1</id>
<created-at type="datetime">2012-02-21...</created-at>
<email>jsmith@example.com</email>
<username>jsmith</username>
</user>
<access-key>%s</access-key>
<status type="integer">0</status>
</hash>"""
self.assertRaises(exception.MalformedResponse,
self.driver.do_setup, None)
def test_ssl_use(self):
"""Coverage test for SSL connection"""
self.flags(zadara_vpsa_use_ssl=True)
self.driver.do_setup(None)
self.flags(zadara_vpsa_use_ssl=False)
def test_bad_http_response(self):
"""Coverage test for non-good HTTP response"""
RUNTIME_VARS['status'] = 400
volume = {'name': 'test_volume_01', 'size': 1}
self.assertRaises(exception.BadHTTPResponseStatus,
self.driver.create_volume, volume)
def test_delete_without_detach(self):
"""Test volume deletion without detach"""
volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101}
connector1 = dict(initiator='test_iqn.1')
connector2 = dict(initiator='test_iqn.2')
connector3 = dict(initiator='test_iqn.3')
self.driver.create_volume(volume1)
props1 = self.driver.initialize_connection(volume1, connector1)
props2 = self.driver.initialize_connection(volume1, connector2)
props3 = self.driver.initialize_connection(volume1, connector3)
self.flags(zadara_vpsa_auto_detach_on_delete=False)
self.assertRaises(exception.VolumeAttached,
self.driver.delete_volume, volume1)
self.flags(zadara_vpsa_auto_detach_on_delete=True)
self.driver.delete_volume(volume1)
def test_no_active_ctrl(self):
RUNTIME_VARS['controllers'] = []
volume = {'name': 'test_volume_01', 'size': 1, 'id': 123}
connector = dict(initiator='test_iqn.1')
self.driver.create_volume(volume)
self.assertRaises(exception.ZadaraVPSANoActiveController,
self.driver.initialize_connection,
volume, connector)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cql.cqltypes import cql_types
class CQLHelpTopics(object):
def get_help_topics(self):
return [ t[5:] for t in dir(self) if t.startswith('help_') ]
def print_help_topic(self, topic):
getattr(self, 'help_' + topic.lower())()
def help_types(self):
print "\n CQL types recognized by this version of cqlsh:\n"
for t in cql_types:
print ' ' + t
print """
For information on the various recognizable input formats for these
types, or on controlling the formatting of cqlsh query output, see
one of the following topics:
HELP TIMESTAMP_INPUT
HELP BLOB_INPUT
HELP UUID_INPUT
HELP BOOLEAN_INPUT
HELP TEXT_OUTPUT
HELP TIMESTAMP_OUTPUT
"""
def help_timestamp_input(self):
print """
Timestamp input
CQL supports any of the following ISO 8601 formats for timestamp
specification:
yyyy-mm-dd HH:mm
yyyy-mm-dd HH:mm:ss
yyyy-mm-dd HH:mmZ
yyyy-mm-dd HH:mm:ssZ
yyyy-mm-dd'T'HH:mm
yyyy-mm-dd'T'HH:mmZ
yyyy-mm-dd'T'HH:mm:ss
yyyy-mm-dd'T'HH:mm:ssZ
yyyy-mm-dd
yyyy-mm-ddZ
The Z in these formats refers to an RFC-822 4-digit time zone,
expressing the time zone's difference from UTC. For example, a
timestamp in Pacific Standard Time might be given thus:
2012-01-20 16:14:12-0800
If no time zone is supplied, the current time zone for the Cassandra
server node will be used.
"""
def help_blob_input(self):
print """
Blob input
CQL blob data must be specified in a string literal as hexidecimal
data. Example: to store the ASCII values for the characters in the
string "CQL", use '43514c'.
"""
def help_uuid_input(self):
print """
UUID input
UUIDs may be specified in CQL using 32 hexidecimal characters,
split up using dashes in the standard UUID format:
XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
"""
def help_boolean_input(self):
print """
Boolean input
CQL accepts the strings 'true' and 'false' (case insensitive)
as input for boolean types.
"""
def help_timestamp_output(self):
print """
Timestamp output
Cqlsh will display timestamps in the following format by default:
yyyy-mm-dd HH:mm:ssZ
which is a format acceptable as CQL timestamp input as well.
The output format can be changed by setting 'time_format' property
in the [ui] section of .cqlshrc file.
"""
def help_text_output(self):
print """
Textual output
When control characters, or other characters which can't be encoded
in your current locale, are found in values of 'text' or 'ascii'
types, it will be shown as a backslash escape. If color is enabled,
any such backslash escapes will be shown in a different color from
the surrounding text.
Unicode code points in your data will be output intact, if the
encoding for your locale is capable of decoding them. If you prefer
that non-ascii characters be shown with Python-style "\\uABCD"
escape sequences, invoke cqlsh with an ASCII locale (for example,
by setting the $LANG environment variable to "C").
"""
help_ascii_output = help_text_output
def help_create_index(self):
print """
CREATE INDEX [<indexname>] ON <cfname> ( <colname> );
A CREATE INDEX statement is used to create a new, automatic secondary
index on the given CQL table, for the named column. A name for the
index itself can be specified before the ON keyword, if desired. A
single column name must be specified inside the parentheses. It is not
necessary for the column to exist on any current rows (Cassandra is
schema-optional), but the column must already have a type (specified
during the CREATE TABLE, or added afterwards with ALTER TABLE).
"""
def help_drop(self):
print """
There are different variants of DROP. For more information, see
one of the following:
HELP DROP_KEYSPACE;
HELP DROP_TABLE;
HELP DROP_INDEX;
"""
def help_drop_keyspace(self):
print """
DROP KEYSPACE <keyspacename>;
A DROP KEYSPACE statement results in the immediate, irreversible
removal of a keyspace, including all column families in it, and all
data contained in those column families.
"""
def help_drop_table(self):
print """
DROP TABLE <tablename>;
A DROP TABLE statement results in the immediate, irreversible
removal of a CQL table and the underlying column family, including all
data contained in it.
"""
help_drop_columnfamily = help_drop_table
def help_drop_index(self):
print """
DROP INDEX <indexname>;
A DROP INDEX statement is used to drop an existing secondary index.
"""
def help_truncate(self):
print """
TRUNCATE <tablename>;
TRUNCATE accepts a single argument for the table name, and permanently
removes all data from it.
"""
def help_create(self):
print """
There are different variants of CREATE. For more information, see
one of the following:
HELP CREATE_KEYSPACE;
HELP CREATE_TABLE;
HELP CREATE_INDEX;
"""
def help_use(self):
print """
USE <keyspacename>;
Tells cqlsh and the connected Cassandra instance that you will be
working in the given keyspace. All subsequent operations on tables
or indexes will be in the context of this keyspace, unless otherwise
specified, until another USE command is issued or the connection
terminates.
As always, when a keyspace name does not work as a normal identifier or
number, it can be quoted using single quotes (CQL 2) or double quotes
(CQL 3).
"""
def help_create_table(self):
print """
CREATE TABLE <cfname> ( <colname> <type> PRIMARY KEY [,
<colname> <type> [, ...]] )
[WITH <optionname> = <val> [AND <optionname> = <val> [...]]];
CREATE TABLE statements create a new CQL table under the current
keyspace. Valid table names are strings of alphanumeric characters and
underscores, which begin with a letter.
Each table requires a primary key, which will correspond to the
underlying columnfamily key and key validator. It's important to
note that the key type you use must be compatible with the partitioner
in use. For example, OrderPreservingPartitioner and
CollatingOrderPreservingPartitioner both require UTF-8 keys.
In cql3 mode, a table can have multiple columns composing the primary
key (see HELP COMPOSITE_PRIMARY_KEYS).
For more information, see one of the following:
HELP CREATE_TABLE_TYPES;
HELP CREATE_TABLE_OPTIONS;
"""
help_create_columnfamily = help_create_table
def help_create_table_types(self):
print """
CREATE TABLE: Specifying column types
CREATE ... (KEY <type> PRIMARY KEY,
othercol <type>) ...
It is possible to assign columns a type during table creation. Columns
configured with a type are validated accordingly when a write occurs,
and intelligent CQL drivers and interfaces will be able to decode the
column values correctly when receiving them. Column types are specified
as a parenthesized, comma-separated list of column term and type pairs.
See HELP TYPES; for the list of recognized types.
"""
help_create_columnfamily_types = help_create_table_types
def help_create_table_options(self):
print """
CREATE TABLE: Specifying columnfamily options
CREATE TABLE blah (...)
WITH optionname = val AND otheroption = val2;
A number of optional keyword arguments can be supplied to control the
configuration of a new CQL table, such as the size of the associated
row and key caches for the underlying Cassandra columnfamily. Consult
your CQL reference for the complete list of options and possible
values.
"""
help_create_columnfamily_options = help_create_table_options
def help_alter(self):
print """
ALTER TABLE <tablename> ALTER <columnname> TYPE <type>;
ALTER TABLE <tablename> ADD <columnname> <type>;
ALTER TABLE <tablename> DROP <columnname>;
ALTER TABLE <tablename> WITH <optionname> = <val> [AND <optionname> = <val> [...]];
An ALTER statement is used to manipulate table metadata. It allows you
to add new typed columns, drop existing columns, change the data
storage type of existing columns, or change table properties.
No results are returned.
See one of the following for more information:
HELP ALTER_ALTER;
HELP ALTER_ADD;
HELP ALTER_DROP;
HELP ALTER_WITH;
"""
def help_alter_alter(self):
print """
ALTER TABLE: altering existing typed columns
ALTER TABLE addamsFamily ALTER lastKnownLocation TYPE uuid;
ALTER TABLE ... ALTER changes the expected storage type for a column.
The column must already have a type in the column family metadata. The
column may or may not already exist in current rows-- but be aware that
no validation of existing data is done. The bytes stored in values for
that column will remain unchanged, and if existing data is not
deserializable according to the new type, this may cause your CQL
driver or interface to report errors.
"""
def help_alter_add(self):
print """
ALTER TABLE: adding a typed column
ALTER TABLE addamsFamily ADD gravesite varchar;
The ALTER TABLE ... ADD variant adds a typed column to a column
family. The column must not already have a type in the column family
metadata. See the warnings on HELP ALTER_ALTER regarding the lack of
validation of existing data; they apply here as well.
"""
def help_alter_drop(self):
print """
ALTER TABLE: dropping a typed column
ALTER TABLE addamsFamily DROP gender;
An ALTER TABLE ... DROP statement removes the type of a column
from the column family metadata. Note that this does _not_ remove the
column from current rows; it just removes the metadata saying that the
bytes stored under that column are expected to be deserializable
according to a certain type.
"""
def help_alter_with(self):
print """
ALTER TABLE: changing column family properties
ALTER TABLE addamsFamily WITH comment = 'Glad to be here!'
AND read_repair_chance = 0.2;
An ALTER TABLE ... WITH statement makes adjustments to the
table properties, as defined when the table was created (see
HELP CREATE_TABLE_OPTIONS and your Cassandra documentation for
information about the supported parameter names and values).
"""
def help_delete_columns(self):
print """
DELETE: specifying columns
DELETE col1, col2, col3 FROM ...
Following the DELETE keyword is an optional comma-delimited list of
column name terms. When no column names are given, the remove applies
to the entire row(s) matched by the WHERE clause.
When column names do not parse as valid CQL identifiers, they can be
quoted in single quotes (CQL 2) or double quotes (CQL 3).
"""
def help_delete_where(self):
print """
DELETE: specifying rows
DELETE ... WHERE keycol = 'some_key_value';
DELETE ... WHERE keycol1 = 'val1' AND keycol2 = 'val2';
DELETE ... WHERE keycol IN (key1, key2);
The WHERE clause is used to determine to which row(s) a DELETE
applies. The first form allows the specification of a precise row
by specifying a particular primary key value (if the primary key has
multiple columns, values for each must be given). The second form
allows a list of key values to be specified using the IN operator
and a parenthesized list of comma-delimited key values.
"""
def help_update_set(self):
print """
UPDATE: Specifying Columns and Row
UPDATE ... SET name1 = value1, name2 = value2
WHERE <key> = keyname;
UPDATE ... SET name1 = value1, name2 = value2
WHERE <key> IN ('<key1>', '<key2>', ...)
Rows are created or updated by supplying column names and values in
term assignment format. Multiple columns can be set by separating the
name/value pairs using commas.
"""
def help_update_counters(self):
print """
UPDATE: Updating Counter Columns
UPDATE ... SET name1 = name1 + <value> ...
UPDATE ... SET name1 = name1 - <value> ...
Counter columns can be incremented or decremented by an arbitrary
numeric value though the assignment of an expression that adds or
substracts the value.
"""
def help_update_where(self):
print """
UPDATE: Selecting rows to update
UPDATE ... WHERE <keyname> = <keyval>;
UPDATE ... WHERE <keyname> IN (<keyval1>, <keyval2>, ...);
UPDATE ... WHERE <keycol1> = <keyval1> AND <keycol2> = <keyval2>;
Each update statement requires a precise set of keys to be specified
using a WHERE clause.
If the table's primary key consists of multiple columns, an explicit
value must be given for each for the UPDATE statement to make sense.
"""
def help_select_table(self):
print """
SELECT: Specifying Table
SELECT ... FROM [<keyspace>.]<tablename> ...
The FROM clause is used to specify the CQL table applicable to a SELECT
query. The keyspace in which the table exists can optionally be
specified along with the table name, separated by a dot (.). This will
not change the current keyspace of the session (see HELP USE).
"""
help_select_columnfamily = help_select_table
def help_select_where(self):
print """
SELECT: Filtering rows
SELECT ... WHERE <key> = keyname AND name1 = value1
SELECT ... WHERE <key> >= startkey and <key> =< endkey AND name1 = value1
SELECT ... WHERE <key> IN ('<key>', '<key>', '<key>', ...)
The WHERE clause provides for filtering the rows that appear in
results. The clause can filter on a key name, or range of keys, and in
the case of indexed columns, on column values. Key filters are
specified using the KEY keyword or key alias name, a relational
operator (one of =, >, >=, <, and <=), and a term value. When terms
appear on both sides of a relational operator it is assumed the filter
applies to an indexed column. With column index filters, the term on
the left of the operator is the name, the term on the right is the
value to filter _on_.
Note: The greater-than and less-than operators (> and <) result in key
ranges that are inclusive of the terms. There is no supported notion of
"strictly" greater-than or less-than; these operators are merely
supported as aliases to >= and <=.
"""
def help_select_limit(self):
print """
SELECT: Limiting results
SELECT ... WHERE <clause> [LIMIT n] ...
Limiting the number of rows returned can be achieved by adding the
LIMIT option to a SELECT expression. LIMIT defaults to 10,000 when left
unset.
"""
class CQL2HelpTopics(CQLHelpTopics):
def help_create_keyspace(self):
print """
CREATE KEYSPACE <ksname> WITH strategy_class = '<strategy>'
[AND strategy_options:<option> = <val>];
The CREATE KEYSPACE statement creates a new top-level namespace (aka
"keyspace"). Valid names are any string constructed of alphanumeric
characters and underscores. Names which do not work as valid
identifiers or integers should be quoted as string literals. Properties
such as replication strategy and count are specified during creation
using the following accepted keyword arguments:
strategy_class [required]: The name of the replication strategy class
which should be used for the new keyspace. Some often-used classes
are SimpleStrategy and NetworkTopologyStrategy.
strategy_options: Most strategies require additional arguments which
can be supplied by appending the option name to "strategy_options",
separated by a colon (:). For example, a strategy option of "DC1"
with a value of "1" would be specified as "strategy_options:DC1 = 1".
The replication factor option for SimpleStrategy could be
"strategy_options:replication_factor=3".
"""
def help_begin(self):
print """
BEGIN BATCH [USING CONSISTENCY <level>
[AND TIMESTAMP <timestamp>]]
<insert or update or delete statement> ;
[ <another insert or update or delete statement ;
[...]]
APPLY BATCH;
BATCH supports setting a client-supplied optional global timestamp
which will be used for each of the operations included in the batch.
A single consistency level is used for the entire batch. It appears
after the BEGIN BATCH statement, and uses the standard "consistency
level specification" (see HELP CONSISTENCYLEVEL). Batched statements
default to CONSISTENCY.ONE when left unspecified.
Only data modification statements (specifically, UPDATE, INSERT,
and DELETE) are allowed in a BATCH statement. BATCH is _not_ an
analogue for SQL transactions.
_NOTE: While there are no isolation guarantees, UPDATE queries are
atomic within a given record._
"""
help_apply = help_begin
def help_select(self):
print """
SELECT [FIRST n] [REVERSED] <selectExpr>
FROM [<keyspace>.]<table>
[USING CONSISTENCY <consistencylevel>]
[WHERE <clause>]
[ORDER BY <colname> [DESC]]
[LIMIT m];
SELECT is used to read one or more records from a CQL table. It returns
a set of rows matching the selection criteria specified.
Note that FIRST and REVERSED are only supported in CQL 2, and ORDER BY
is only supported in CQL 3 and higher.
For more information, see one of the following:
HELP SELECT_EXPR
HELP SELECT_TABLE
HELP SELECT_WHERE
HELP SELECT_LIMIT
HELP CONSISTENCYLEVEL
"""
def help_delete(self):
print """
DELETE [<col1> [, <col2>, ...] FROM [<keyspace>.]<tablename>
[USING CONSISTENCY <consistencylevel>
[AND TIMESTAMP <timestamp>]]
WHERE <keyname> = <keyvalue>;
A DELETE is used to perform the removal of one or more columns from one
or more rows. Each DELETE statement requires a precise set of row keys
to be specified using a WHERE clause and the KEY keyword or key alias.
For more information, see one of the following:
HELP DELETE_USING
HELP DELETE_COLUMNS
HELP DELETE_WHERE
HELP CONSISTENCYLEVEL
"""
def help_delete_using(self):
print """
DELETE: the USING clause
DELETE ... USING CONSISTENCY <consistencylevel>;
DELETE ... USING TIMESTAMP <timestamp>;
The USING clause allows setting of certain query and data parameters.
If multiple parameters need to be set, these may be joined using AND.
Example:
DELETE ... CONSISTENCY LOCAL_QUORUM AND TIMESTAMP 1318452291034;
<timestamp> defines the optional timestamp for the new tombstone
record. It must be an integer. Cassandra timestamps are generally
specified using milliseconds since the Unix epoch (1970-01-01 00:00:00
UTC).
"""
def help_update(self):
print """
UPDATE [<keyspace>.]<columnFamily>
[USING CONSISTENCY <consistencylevel>
[AND TIMESTAMP <timestamp>]
[AND TTL <timeToLive>]]
SET name1 = value1, name2 = value2 WHERE <keycol> = keyval;
An UPDATE is used to write one or more columns to a record in a table.
No results are returned. The record's primary key must be completely
and uniquely specified; that is, if the primary key includes multiple
columns, all must be explicitly given in the WHERE clause.
Statements begin with the UPDATE keyword followed by the name of the
table to be updated.
For more information, see one of the following:
HELP UPDATE_USING
HELP UPDATE_SET
HELP UPDATE_COUNTERS
HELP UPDATE_WHERE
HELP CONSISTENCYLEVEL
"""
def help_update_using(self):
print """
UPDATE: the USING clause
UPDATE ... USING TIMESTAMP <timestamp>;
UPDATE ... USING TTL <timeToLive>;
UPDATE ... USING CONSISTENCY <consistencylevel>;
The USING clause allows setting of certain query and data parameters.
If multiple parameters need to be set, these may be joined using AND.
Example:
UPDATE ... USING TTL 43200 AND CONSISTENCY LOCAL_QUORUM;
<timestamp> defines the optional timestamp for the new column value(s).
It must be an integer. Cassandra timestamps are generally specified
using milliseconds since the Unix epoch (1970-01-01 00:00:00 UTC).
<timeToLive> defines the optional time to live (TTL) in seconds for the
new column value(s). It must be an integer.
"""
def help_consistencylevel(self):
print """
Consistency Level Specification
... USING CONSISTENCY <consistencylevel> ...
Consistency level specifications are made up of keyword USING,
followed by a consistency level identifier. Valid consistency level
identifiers are as follows:
* ANY
* ONE (default)
* TWO
* THREE
* QUORUM
* ALL
* LOCAL_QUORUM
* EACH_QUORUM
For more information on how consistency levels work, consult your
Cassandra documentation.
"""
def help_insert(self):
print """
INSERT INTO [<keyspace>.]<tablename>
( <colname1>, <colname2> [, <colname3> [, ...]] )
VALUES ( <colval1>, <colval2> [, <colval3> [, ...]] )
[USING CONSISTENCY <consistencylevel>
[AND TIMESTAMP <timestamp>]
[AND TTL <timeToLive>]];
An INSERT is used to write one or more columns to a record in a
CQL table. No results are returned.
Values for all component columns in the table's primary key must
be given. Also, there must be at least one non-primary-key column
specified (Cassandra rows are not considered to exist with only
a key and no associated columns).
Unlike in SQL, the semantics of INSERT and UPDATE are identical.
In either case a record is created if none existed before, and
udpated when it does. For more information, see one of the
following:
HELP UPDATE
HELP UPDATE_USING
HELP CONSISTENCYLEVEL
"""
def help_select_expr(self):
print """
SELECT: Specifying Columns
SELECT [FIRST n] [REVERSED] name1, name2, name3 FROM ...
SELECT [FIRST n] [REVERSED] name1..nameN FROM ...
SELECT COUNT(*) FROM ...
The SELECT expression determines which columns will appear in the
results and takes the form of either a comma separated list of names,
or a range. The range notation consists of a start and end column name
separated by two periods (..). The set of columns returned for a
range is start and end inclusive.
The FIRST option accepts an integer argument and can be used to apply a
limit to the number of columns returned per row. When this limit is
left unset, it defaults to 10,000 columns.
The REVERSED option causes the sort order of the results to be
reversed.
It is worth noting that unlike the projection in a SQL SELECT, there is
no guarantee that the results will contain all of the columns
specified. This is because Cassandra is schema-less and there are no
guarantees that a given column exists.
When the COUNT aggregate function is specified as a column to fetch, a
single row will be returned, with a single column named "count" whose
value is the number of rows from the pre-aggregation resultset.
Currently, COUNT is the only function supported by CQL.
** [FIRST n] and [REVERSED] are no longer supported in CQL 3.
"""
class CQL3HelpTopics(CQLHelpTopics):
def help_create_keyspace(self):
print """
CREATE KEYSPACE <ksname>
WITH replication = {'class':'<strategy>' [,'<option>':<val>]};
The CREATE KEYSPACE statement creates a new top-level namespace (aka
"keyspace"). Valid names are any string constructed of alphanumeric
characters and underscores. Names which do not work as valid
identifiers or integers should be quoted as string literals. Properties
such as replication strategy and count are specified during creation
as key-value pairs in the 'replication' map:
class [required]: The name of the replication strategy class
which should be used for the new keyspace. Some often-used classes
are SimpleStrategy and NetworkTopologyStrategy.
other options [optional]: Most strategies require additional arguments
which can be supplied as key-value pairs in the 'replication' map.
Examples:
To create a keyspace with NetworkTopologyStrategy and strategy option of "DC1"
with a value of "1" and "DC2" with a value of "2" you would use
the following statement:
CREATE KEYSPACE <ksname>
WITH replication = {'class':'NetworkTopologyStrategy', 'DC1':1, 'DC2':2};
To create a keyspace with SimpleStrategy and "replication_factor" option
with a value of "3" you would use this statement:
CREATE KEYSPACE <ksname>
WITH replication = {'class':'SimpleStrategy', 'replication_factor':3};
"""
def help_begin(self):
print """
BEGIN [UNLOGGED|COUNTER] BATCH [USING TIMESTAMP <timestamp>]
<insert or update or delete statement> ;
[ <another insert or update or delete statement ;
[...]]
APPLY BATCH;
BATCH supports setting a client-supplied optional global timestamp
which will be used for each of the operations included in the batch.
Only data modification statements (specifically, UPDATE, INSERT,
and DELETE) are allowed in a BATCH statement. BATCH is _not_ an
analogue for SQL transactions.
_NOTE: Counter mutations are allowed only within COUNTER batches._
_NOTE: While there are no isolation guarantees, UPDATE queries are
atomic within a given record._
"""
help_apply = help_begin
def help_select(self):
print """
SELECT <selectExpr>
FROM [<keyspace>.]<table>
[WHERE <clause>]
[ORDER BY <colname> [DESC]]
[LIMIT m];
SELECT is used to read one or more records from a CQL table. It returns
a set of rows matching the selection criteria specified.
For more information, see one of the following:
HELP SELECT_EXPR
HELP SELECT_TABLE
HELP SELECT_WHERE
HELP SELECT_LIMIT
"""
def help_delete(self):
print """
DELETE [<col1> [, <col2>, ...] FROM [<keyspace>.]<tablename>
[USING TIMESTAMP <timestamp>]
WHERE <keyname> = <keyvalue>;
A DELETE is used to perform the removal of one or more columns from one
or more rows. Each DELETE statement requires a precise set of row keys
to be specified using a WHERE clause and the KEY keyword or key alias.
For more information, see one of the following:
HELP DELETE_USING
HELP DELETE_COLUMNS
HELP DELETE_WHERE
"""
def help_delete_using(self):
print """
DELETE: the USING clause
DELETE ... USING TIMESTAMP <timestamp>;
<timestamp> defines the optional timestamp for the new tombstone
record. It must be an integer. Cassandra timestamps are generally
specified using milliseconds since the Unix epoch (1970-01-01 00:00:00
UTC).
"""
def help_update(self):
print """
UPDATE [<keyspace>.]<columnFamily>
[USING [TIMESTAMP <timestamp>]
[AND TTL <timeToLive>]]
SET name1 = value1, name2 = value2 WHERE <keycol> = keyval;
An UPDATE is used to write one or more columns to a record in a table.
No results are returned. The record's primary key must be completely
and uniquely specified; that is, if the primary key includes multiple
columns, all must be explicitly given in the WHERE clause.
Statements begin with the UPDATE keyword followed by the name of the
table to be updated.
For more information, see one of the following:
HELP UPDATE_USING
HELP UPDATE_SET
HELP UPDATE_COUNTERS
HELP UPDATE_WHERE
"""
def help_update_using(self):
print """
UPDATE: the USING clause
UPDATE ... USING TIMESTAMP <timestamp>;
UPDATE ... USING TTL <timeToLive>;
The USING clause allows setting of certain query and data parameters.
If multiple parameters need to be set, these may be joined using AND.
Example:
UPDATE ... USING TTL 43200 AND TIMESTAMP 1351620509603
<timestamp> defines the optional timestamp for the new column value(s).
It must be an integer. Cassandra timestamps are generally specified
using milliseconds since the Unix epoch (1970-01-01 00:00:00 UTC).
<timeToLive> defines the optional time to live (TTL) in seconds for the
new column value(s). It must be an integer.
"""
def help_insert(self):
print """
INSERT INTO [<keyspace>.]<tablename>
( <colname1>, <colname2> [, <colname3> [, ...]] )
VALUES ( <colval1>, <colval2> [, <colval3> [, ...]] )
[USING TIMESTAMP <timestamp>]
[AND TTL <timeToLive]];
An INSERT is used to write one or more columns to a record in a
CQL table. No results are returned.
Values for all component columns in the table's primary key must
be given. Also, there must be at least one non-primary-key column
specified (Cassandra rows are not considered to exist with only
a key and no associated columns).
Unlike in SQL, the semantics of INSERT and UPDATE are identical.
In either case a record is created if none existed before, and
udpated when it does. For more information, see one of the
following:
HELP UPDATE
HELP UPDATE_USING
"""
def help_select_expr(self):
print """
SELECT: Specifying Columns
SELECT name1, name2, name3 FROM ...
SELECT COUNT(*) FROM ...
The SELECT expression determines which columns will appear in the
results and takes the form of a comma separated list of names.
It is worth noting that unlike the projection in a SQL SELECT, there is
no guarantee that the results will contain all of the columns
specified. This is because Cassandra is schema-less and there are no
guarantees that a given column exists.
When the COUNT aggregate function is specified as a column to fetch, a
single row will be returned, with a single column named "count" whose
value is the number of rows from the pre-aggregation resultset.
Currently, COUNT is the only function supported by CQL.
"""
def help_alter_drop(self):
print """
ALTER TABLE: dropping a typed column
ALTER TABLE addamsFamily DROP gender;
An ALTER TABLE ... DROP statement removes the type of a column
from the column family metadata. Dropped columns will immediately
become unavailable in the queries and will not be included in
compacted sstables in the future. If a column is readded, queries
won't return values written before the column was last dropped.
It is assumed that timestamps represent actual time, so if this
is not your case, you should NOT readd previously dropped columns.
Columns can't be dropped from tables defined with COMPACT STORAGE.
"""
def help_create(self):
super(CQL3HelpTopics, self).help_create()
print " HELP CREATE_USER;"
def help_alter(self):
super(CQL3HelpTopics, self).help_alter()
print " HELP ALTER_USER;"
def help_drop(self):
super(CQL3HelpTopics, self).help_drop()
print " HELP DROP_USER;"
def help_list(self):
print """
There are different variants of LIST. For more information, see
one of the following:
HELP LIST_USERS;
HELP LIST_PERMISSIONS;
"""
def help_create_user(self):
print """
CREATE USER <username> [WITH PASSWORD 'password'] [NOSUPERUSER | SUPERUSER];
CREATE USER creates a new Cassandra user account.
Only superusers can issue CREATE USER requests.
To create a superuser account use SUPERUSER option (NOSUPERUSER is the default).
WITH PASSWORD clause should only be used with password-based authenticators,
e.g. PasswordAuthenticator, SimpleAuthenticator.
"""
def help_alter_user(self):
print """
ALTER USER <username> [WITH PASSWORD 'password'] [NOSUPERUSER | SUPERUSER];
Use ALTER USER to change a user's superuser status and/or password (only
with password-based authenticators).
Superusers can change a user's password or superuser status (except their own).
Users cannot change their own superuser status. Ordinary users can only change their
password (if the configured authenticator is password-based).
"""
def help_drop_user(self):
print """
DROP USER <username>;
DROP USER removes an existing user. You have to be logged in as a superuser
to issue a DROP USER statement. A user cannot drop themselves.
"""
def help_list_users(self):
print """
LIST USERS;
List existing users and their superuser status.
"""
def help_grant(self):
print """
GRANT (<permission> [PERMISSION] | ALL [PERMISSIONS])
ON ALL KEYSPACES
| KEYSPACE <keyspace>
| [TABLE] [<keyspace>.]<table>
TO <username>
Grant the specified permission (or all permissions) on a resource
to a user.
To be able to grant a permission on some resource you have to
have that permission yourself and also AUTHORIZE permission on it,
or on one of its parent resources.
See HELP PERMISSIONS for more info on the available permissions.
"""
def help_revoke(self):
print """
REVOKE (<permission> [PERMISSION] | ALL [PERMISSIONS])
ON ALL KEYSPACES
| KEYSPACE <keyspace>
| [TABLE] [<keyspace>.]<table>
FROM <username>
Revokes the specified permission (or all permissions) on a resource
from a user.
To be able to revoke a permission on some resource you have to
have that permission yourself and also AUTHORIZE permission on it,
or on one of its parent resources.
See HELP PERMISSIONS for more info on the available permissions.
"""
def help_list_permissions(self):
print """
LIST (<permission> [PERMISSION] | ALL [PERMISSIONS])
[ON ALL KEYSPACES
| KEYSPACE <keyspace>
| [TABLE] [<keyspace>.]<table>]
[OF <username>]
[NORECURSIVE]
Omitting ON <resource> part will list permissions on ALL KEYSPACES,
every keyspace and table.
Omitting OF <username> part will list permissions of all users.
Omitting NORECURSIVE specifier will list permissions of the resource
and all its parents (table, table's keyspace and ALL KEYSPACES).
See HELP PERMISSIONS for more info on the available permissions.
"""
def help_permissions(self):
print """
PERMISSIONS
Cassandra has 6 permissions:
ALTER: required for ALTER KEYSPCE, ALTER TABLE, CREATE INDEX, DROP INDEX
AUTHORIZE: required for GRANT, REVOKE
CREATE: required for CREATE KEYSPACE, CREATE TABLE
DROP: required for DROP KEYSPACE, DROP TABLE
MODIFY: required for INSERT, DELETE, UPDATE, TRUNCATE
SELECT: required for SELECT
"""
|
|
#!/usr/bin/env python3
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Code generator for Vulkan function pointers."""
import filecmp
import optparse
import os
import platform
import sys
from os import path
from string import Template
from subprocess import call
vulkan_reg_path = path.join(path.dirname(__file__), "..", "..", "third_party",
"vulkan-deps", "vulkan-headers", "src", "registry")
sys.path.append(vulkan_reg_path)
from reg import Registry
registry = Registry()
registry.loadFile(open(path.join(vulkan_reg_path, "vk.xml")))
VULKAN_REQUIRED_API_VERSION = 'VK_API_VERSION_1_1'
VULKAN_UNASSOCIATED_FUNCTIONS = [
{
'functions': [
# vkGetInstanceProcAddr belongs here but is handled specially.
'vkEnumerateInstanceVersion',
'vkCreateInstance',
'vkEnumerateInstanceExtensionProperties',
'vkEnumerateInstanceLayerProperties',
]
}
]
VULKAN_INSTANCE_FUNCTIONS = [
{
'functions': [
'vkCreateDevice',
'vkDestroyInstance',
'vkEnumerateDeviceExtensionProperties',
'vkEnumerateDeviceLayerProperties',
'vkEnumeratePhysicalDevices',
'vkGetDeviceProcAddr',
'vkGetPhysicalDeviceFeatures2',
'vkGetPhysicalDeviceFormatProperties',
'vkGetPhysicalDeviceFormatProperties2',
'vkGetPhysicalDeviceImageFormatProperties2',
'vkGetPhysicalDeviceMemoryProperties',
'vkGetPhysicalDeviceMemoryProperties2',
'vkGetPhysicalDeviceProperties',
'vkGetPhysicalDeviceProperties2',
'vkGetPhysicalDeviceQueueFamilyProperties',
]
},
{
'ifdef': 'DCHECK_IS_ON()',
'extension': 'VK_EXT_DEBUG_REPORT_EXTENSION_NAME',
'functions': [
'vkCreateDebugReportCallbackEXT',
'vkDestroyDebugReportCallbackEXT',
]
},
{
'extension': 'VK_KHR_SURFACE_EXTENSION_NAME',
'functions': [
'vkDestroySurfaceKHR',
'vkGetPhysicalDeviceSurfaceCapabilitiesKHR',
'vkGetPhysicalDeviceSurfaceFormatsKHR',
'vkGetPhysicalDeviceSurfaceSupportKHR',
]
},
{
'ifdef': 'defined(USE_VULKAN_XCB)',
'extension': 'VK_KHR_XCB_SURFACE_EXTENSION_NAME',
'functions': [
'vkCreateXcbSurfaceKHR',
'vkGetPhysicalDeviceXcbPresentationSupportKHR',
]
},
{
'ifdef': 'defined(OS_WIN)',
'extension': 'VK_KHR_WIN32_SURFACE_EXTENSION_NAME',
'functions': [
'vkCreateWin32SurfaceKHR',
'vkGetPhysicalDeviceWin32PresentationSupportKHR',
]
},
{
'ifdef': 'defined(OS_ANDROID)',
'extension': 'VK_KHR_ANDROID_SURFACE_EXTENSION_NAME',
'functions': [
'vkCreateAndroidSurfaceKHR',
]
},
{
'ifdef': 'defined(OS_FUCHSIA)',
'extension': 'VK_FUCHSIA_IMAGEPIPE_SURFACE_EXTENSION_NAME',
'functions': [
'vkCreateImagePipeSurfaceFUCHSIA',
]
},
]
VULKAN_DEVICE_FUNCTIONS = [
{
'functions': [
'vkAllocateCommandBuffers',
'vkAllocateDescriptorSets',
'vkAllocateMemory',
'vkBeginCommandBuffer',
'vkBindBufferMemory',
'vkBindBufferMemory2',
'vkBindImageMemory',
'vkBindImageMemory2',
'vkCmdBeginRenderPass',
'vkCmdCopyBuffer',
'vkCmdCopyBufferToImage',
'vkCmdCopyImageToBuffer',
'vkCmdEndRenderPass',
'vkCmdExecuteCommands',
'vkCmdNextSubpass',
'vkCmdPipelineBarrier',
'vkCreateBuffer',
'vkCreateCommandPool',
'vkCreateDescriptorPool',
'vkCreateDescriptorSetLayout',
'vkCreateFence',
'vkCreateFramebuffer',
'vkCreateGraphicsPipelines',
'vkCreateImage',
'vkCreateImageView',
'vkCreateRenderPass',
'vkCreateSampler',
'vkCreateSemaphore',
'vkCreateShaderModule',
'vkDestroyBuffer',
'vkDestroyCommandPool',
'vkDestroyDescriptorPool',
'vkDestroyDescriptorSetLayout',
'vkDestroyDevice',
'vkDestroyFence',
'vkDestroyFramebuffer',
'vkDestroyImage',
'vkDestroyImageView',
'vkDestroyRenderPass',
'vkDestroySampler',
'vkDestroySemaphore',
'vkDestroyShaderModule',
'vkDeviceWaitIdle',
'vkFlushMappedMemoryRanges',
'vkEndCommandBuffer',
'vkFreeCommandBuffers',
'vkFreeDescriptorSets',
'vkFreeMemory',
'vkInvalidateMappedMemoryRanges',
'vkGetBufferMemoryRequirements',
'vkGetBufferMemoryRequirements2',
'vkGetDeviceQueue',
'vkGetDeviceQueue2',
'vkGetFenceStatus',
'vkGetImageMemoryRequirements',
'vkGetImageMemoryRequirements2',
'vkGetImageSubresourceLayout',
'vkMapMemory',
'vkQueueSubmit',
'vkQueueWaitIdle',
'vkResetCommandBuffer',
'vkResetFences',
'vkUnmapMemory',
'vkUpdateDescriptorSets',
'vkWaitForFences',
]
},
{
'ifdef': 'defined(OS_ANDROID)',
'extension':
'VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME',
'functions': [
'vkGetAndroidHardwareBufferPropertiesANDROID',
]
},
{
'ifdef': 'defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_ANDROID)',
'extension': 'VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME',
'functions': [
'vkGetSemaphoreFdKHR',
'vkImportSemaphoreFdKHR',
]
},
{
'ifdef': 'defined(OS_WIN)',
'extension': 'VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME',
'functions': [
'vkGetSemaphoreWin32HandleKHR',
'vkImportSemaphoreWin32HandleKHR',
]
},
{
'ifdef': 'defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_ANDROID)',
'extension': 'VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME',
'functions': [
'vkGetMemoryFdKHR',
'vkGetMemoryFdPropertiesKHR',
]
},
{
'ifdef': 'defined(OS_WIN)',
'extension': 'VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME',
'functions': [
'vkGetMemoryWin32HandleKHR',
'vkGetMemoryWin32HandlePropertiesKHR',
]
},
{
'ifdef': 'defined(OS_FUCHSIA)',
'extension': 'VK_FUCHSIA_EXTERNAL_SEMAPHORE_EXTENSION_NAME',
'functions': [
'vkImportSemaphoreZirconHandleFUCHSIA',
'vkGetSemaphoreZirconHandleFUCHSIA',
]
},
{
'ifdef': 'defined(OS_FUCHSIA)',
'extension': 'VK_FUCHSIA_EXTERNAL_MEMORY_EXTENSION_NAME',
'functions': [
'vkGetMemoryZirconHandleFUCHSIA',
]
},
{
'ifdef': 'defined(OS_FUCHSIA)',
'extension': 'VK_FUCHSIA_BUFFER_COLLECTION_X_EXTENSION_NAME',
'functions': [
'vkCreateBufferCollectionFUCHSIAX',
'vkSetBufferCollectionConstraintsFUCHSIAX',
'vkGetBufferCollectionPropertiesFUCHSIAX',
'vkDestroyBufferCollectionFUCHSIAX',
]
},
{
'extension': 'VK_KHR_SWAPCHAIN_EXTENSION_NAME',
'functions': [
'vkAcquireNextImageKHR',
'vkCreateSwapchainKHR',
'vkDestroySwapchainKHR',
'vkGetSwapchainImagesKHR',
'vkQueuePresentKHR',
]
},
{
'ifdef': 'defined(OS_LINUX) || defined(OS_CHROMEOS)',
'extension': 'VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME',
'functions': [
'vkGetImageDrmFormatModifierPropertiesEXT',
]
}
]
SELF_LOCATION = os.path.dirname(os.path.abspath(__file__))
LICENSE_AND_HEADER = """\
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// This file is auto-generated from
// gpu/vulkan/generate_bindings.py
// It's formatted by clang-format using chromium coding style:
// clang-format -i -style=chromium filename
// DO NOT EDIT!
"""
def WriteFunctionsInternal(out_file, functions, gen_content,
check_extension=False):
for group in functions:
if 'ifdef' in group:
out_file.write('#if %s\n' % group['ifdef'])
extension = group['extension'] if 'extension' in group else ''
min_api_version = \
group['min_api_version'] if 'min_api_version' in group else ''
if not check_extension:
for func in group['functions']:
out_file.write(gen_content(func))
elif not extension and not min_api_version:
for func in group['functions']:
out_file.write(gen_content(func))
else:
if min_api_version:
out_file.write(' if (api_version >= %s) {\n' % min_api_version)
for func in group['functions']:
out_file.write(
gen_content(func))
out_file.write('}\n')
if extension:
out_file.write('else ')
if extension:
out_file.write('if (gfx::HasExtension(enabled_extensions, %s)) {\n' %
extension)
extension_suffix = \
group['extension_suffix'] if 'extension_suffix' in group \
else ''
for func in group['functions']:
out_file.write(gen_content(func, extension_suffix))
out_file.write('}\n')
if 'ifdef' in group:
out_file.write('#endif // %s\n' % group['ifdef'])
out_file.write('\n')
def WriteFunctions(out_file, functions, template, check_extension=False):
def gen_content(func, suffix=''):
return template.substitute({'name': func,'extension_suffix': suffix})
WriteFunctionsInternal(out_file, functions, gen_content, check_extension)
def WriteFunctionDeclarations(out_file, functions):
template = Template(' VulkanFunction<PFN_${name}> ${name};\n')
WriteFunctions(out_file, functions, template)
def WriteMacros(out_file, functions):
def gen_content(func, suffix=''):
if func not in registry.cmddict:
# Some fuchsia functions are not in the vulkan registry, so use macro for
# them.
template = Template(
'#define $name gpu::GetVulkanFunctionPointers()->${name}\n')
return template.substitute({'name': func, 'extension_suffix' : suffix})
none_str = lambda s: s if s else ''
cmd = registry.cmddict[func].elem
proto = cmd.find('proto')
params = cmd.findall('param')
pdecl = none_str(proto.text)
for elem in proto:
text = none_str(elem.text)
tail = none_str(elem.tail)
pdecl += text + tail
n = len(params)
callstat = 'return gpu::GetVulkanFunctionPointers()->%s(' % func
paramdecl = '('
if n > 0:
paramnames = (''.join(t for t in p.itertext())
for p in params)
paramdecl += ', '.join(paramnames)
paramnames = (''.join(p[1].text)
for p in params)
callstat += ', '.join(paramnames)
else:
paramdecl += 'void'
paramdecl += ')'
callstat += ')'
pdecl += paramdecl
return 'ALWAYS_INLINE %s { %s; }\n' % (pdecl, callstat)
WriteFunctionsInternal(out_file, functions, gen_content)
def GenerateHeaderFile(out_file):
"""Generates gpu/vulkan/vulkan_function_pointers.h"""
out_file.write(LICENSE_AND_HEADER +
"""
#ifndef GPU_VULKAN_VULKAN_FUNCTION_POINTERS_H_
#define GPU_VULKAN_VULKAN_FUNCTION_POINTERS_H_
#include <vulkan/vulkan.h>
#include "base/compiler_specific.h"
#include "base/component_export.h"
#include "base/native_library.h"
#include "build/build_config.h"
#include "ui/gfx/extension_set.h"
#if defined(OS_ANDROID)
#include <vulkan/vulkan_android.h>
#endif
#if defined(OS_FUCHSIA)
#include <zircon/types.h>
// <vulkan/vulkan_fuchsia.h> must be included after <zircon/types.h>
#include <vulkan/vulkan_fuchsia.h>
#include "gpu/vulkan/fuchsia/vulkan_fuchsia_ext.h"
#endif
#if defined(USE_VULKAN_XCB)
#include <xcb/xcb.h>
// <vulkan/vulkan_xcb.h> must be included after <xcb/xcb.h>
#include <vulkan/vulkan_xcb.h>
#endif
#if defined(OS_WIN)
#include <vulkan/vulkan_win32.h>
#endif
namespace gpu {
struct VulkanFunctionPointers;
constexpr uint32_t kVulkanRequiredApiVersion = %s;
COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers* GetVulkanFunctionPointers();
struct COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers {
VulkanFunctionPointers();
~VulkanFunctionPointers();
bool BindUnassociatedFunctionPointers(
PFN_vkGetInstanceProcAddr proc = nullptr);
// These functions assume that vkGetInstanceProcAddr has been populated.
bool BindInstanceFunctionPointers(
VkInstance vk_instance,
uint32_t api_version,
const gfx::ExtensionSet& enabled_extensions);
// These functions assume that vkGetDeviceProcAddr has been populated.
bool BindDeviceFunctionPointers(
VkDevice vk_device,
uint32_t api_version,
const gfx::ExtensionSet& enabled_extensions);
base::NativeLibrary vulkan_loader_library = nullptr;
template<typename T>
class VulkanFunction;
template <typename R, typename ...Args>
class VulkanFunction <R(VKAPI_PTR*)(Args...)> {
public:
using Fn = R(VKAPI_PTR*)(Args...);
explicit operator bool() const {
return !!fn_;
}
NO_SANITIZE("cfi-icall")
R operator()(Args... args) const {
return fn_(args...);
}
Fn get() const { return fn_; }
private:
friend VulkanFunctionPointers;
Fn operator=(Fn fn) {
fn_ = fn;
return fn_;
}
Fn fn_ = nullptr;
};
// Unassociated functions
VulkanFunction<PFN_vkGetInstanceProcAddr> vkGetInstanceProcAddr;
""" % VULKAN_REQUIRED_API_VERSION)
WriteFunctionDeclarations(out_file, VULKAN_UNASSOCIATED_FUNCTIONS)
out_file.write("""\
// Instance functions
""")
WriteFunctionDeclarations(out_file, VULKAN_INSTANCE_FUNCTIONS);
out_file.write("""\
// Device functions
""")
WriteFunctionDeclarations(out_file, VULKAN_DEVICE_FUNCTIONS)
out_file.write("""\
};
} // namespace gpu
// Unassociated functions
""")
WriteMacros(out_file, [{'functions': [ 'vkGetInstanceProcAddr']}])
WriteMacros(out_file, VULKAN_UNASSOCIATED_FUNCTIONS)
out_file.write("""\
// Instance functions
""")
WriteMacros(out_file, VULKAN_INSTANCE_FUNCTIONS);
out_file.write("""\
// Device functions
""")
WriteMacros(out_file, VULKAN_DEVICE_FUNCTIONS)
out_file.write("""\
#endif // GPU_VULKAN_VULKAN_FUNCTION_POINTERS_H_""")
def WriteFunctionPointerInitialization(out_file, proc_addr_function, parent,
functions):
template = Template(""" ${name} = reinterpret_cast<PFN_${name}>(
${get_proc_addr}(${parent}, "${name}${extension_suffix}"));
if (!${name}) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "${name}${extension_suffix}";
return false;
}
""")
# Substitute all values in the template, except name, which is processed in
# WriteFunctions().
template = Template(template.substitute({
'name': '${name}', 'extension_suffix': '${extension_suffix}',
'get_proc_addr': proc_addr_function, 'parent': parent}))
WriteFunctions(out_file, functions, template, check_extension=True)
def WriteUnassociatedFunctionPointerInitialization(out_file, functions):
WriteFunctionPointerInitialization(out_file, 'vkGetInstanceProcAddr',
'nullptr', functions)
def WriteInstanceFunctionPointerInitialization(out_file, functions):
WriteFunctionPointerInitialization(out_file, 'vkGetInstanceProcAddr',
'vk_instance', functions)
def WriteDeviceFunctionPointerInitialization(out_file, functions):
WriteFunctionPointerInitialization(out_file, 'vkGetDeviceProcAddr',
'vk_device', functions)
def GenerateSourceFile(out_file):
"""Generates gpu/vulkan/vulkan_function_pointers.cc"""
out_file.write(LICENSE_AND_HEADER +
"""
#include "gpu/vulkan/vulkan_function_pointers.h"
#include "base/logging.h"
#include "base/no_destructor.h"
namespace gpu {
VulkanFunctionPointers* GetVulkanFunctionPointers() {
static base::NoDestructor<VulkanFunctionPointers> vulkan_function_pointers;
return vulkan_function_pointers.get();
}
VulkanFunctionPointers::VulkanFunctionPointers() = default;
VulkanFunctionPointers::~VulkanFunctionPointers() = default;
bool VulkanFunctionPointers::BindUnassociatedFunctionPointers(
PFN_vkGetInstanceProcAddr proc) {
if (proc) {
DCHECK(!vulkan_loader_library);
vkGetInstanceProcAddr = proc;
} else {
// vkGetInstanceProcAddr must be handled specially since it gets its
// function pointer through base::GetFunctionPOinterFromNativeLibrary().
// Other Vulkan functions don't do this.
vkGetInstanceProcAddr = reinterpret_cast<PFN_vkGetInstanceProcAddr>(
base::GetFunctionPointerFromNativeLibrary(vulkan_loader_library,
"vkGetInstanceProcAddr"));
if (!vkGetInstanceProcAddr)
return false;
}
""")
WriteUnassociatedFunctionPointerInitialization(
out_file, VULKAN_UNASSOCIATED_FUNCTIONS)
out_file.write("""\
return true;
}
bool VulkanFunctionPointers::BindInstanceFunctionPointers(
VkInstance vk_instance,
uint32_t api_version,
const gfx::ExtensionSet& enabled_extensions) {
DCHECK_GE(api_version, kVulkanRequiredApiVersion);
""")
WriteInstanceFunctionPointerInitialization(
out_file, VULKAN_INSTANCE_FUNCTIONS);
out_file.write("""\
return true;
}
bool VulkanFunctionPointers::BindDeviceFunctionPointers(
VkDevice vk_device,
uint32_t api_version,
const gfx::ExtensionSet& enabled_extensions) {
DCHECK_GE(api_version, kVulkanRequiredApiVersion);
// Device functions
""")
WriteDeviceFunctionPointerInitialization(out_file, VULKAN_DEVICE_FUNCTIONS)
out_file.write("""\
return true;
}
} // namespace gpu
""")
def main(argv):
"""This is the main function."""
parser = optparse.OptionParser()
parser.add_option(
"--output-dir",
help="Output directory for generated files. Defaults to this script's "
"directory.")
parser.add_option(
"-c", "--check", action="store_true",
help="Check if output files match generated files in chromium root "
"directory. Use this in PRESUBMIT scripts with --output-dir.")
(options, _) = parser.parse_args(args=argv)
# Support generating files for PRESUBMIT.
if options.output_dir:
output_dir = options.output_dir
else:
output_dir = SELF_LOCATION
def ClangFormat(filename):
formatter = "clang-format"
if platform.system() == "Windows":
formatter += ".bat"
call([formatter, "-i", "-style=chromium", filename])
header_file_name = 'vulkan_function_pointers.h'
header_file = open(
os.path.join(output_dir, header_file_name), 'w')
GenerateHeaderFile(header_file)
header_file.close()
ClangFormat(header_file.name)
source_file_name = 'vulkan_function_pointers.cc'
source_file = open(
os.path.join(output_dir, source_file_name), 'w')
GenerateSourceFile(source_file)
source_file.close()
ClangFormat(source_file.name)
check_failed_filenames = []
if options.check:
for filename in [header_file_name, source_file_name]:
if not filecmp.cmp(os.path.join(output_dir, filename),
os.path.join(SELF_LOCATION, filename)):
check_failed_filenames.append(filename)
if len(check_failed_filenames) > 0:
print('Please run gpu/vulkan/generate_bindings.py')
print('Failed check on generated files:')
for filename in check_failed_filenames:
print(filename)
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
"""Dummy module to create ports to occupy low ip addresses"""
#!/usr/bin/env python
import pyrax
from ansible.module_utils.basic import *
uri_ports = 'https://dfw.networks.api.rackspacecloud.com/v2.0/ports'
uri_networks = 'https://dfw.networks.api.rackspacecloud.com/v2.0/networks'
def get_port(cnw, network_id, ip_list):
try:
result, ports = cnw.identity.method_get(uri_ports)
if result.status_code == 200:
ports_on_network = filter(lambda port: port['network_id'] == \
network_id, ports['ports'])
for port in ports_on_network:
for ip in port['fixed_ips']:
if ip['ip_address'] in ip_list:
return result.status_code, port
return 404, {'status': 404, 'message': 'port not found'}
except Exception as e:
return 'ERROR', {'status': 'ERROR', 'message': e.message}
def get_subnet_id(cnw, network_id):
"""There will only be a single subnet"""
result, networks = cnw.identity.method_get(uri_networks)
if result.status_code == 200:
network = filter(lambda net: net['id'] == network_id,
networks['networks'])
subnet_id = network[0]['subnets'][0]
return subnet_id
return None
def port_present(data):
"""Means port is a part of the subnet and has the ip
"""
port_name = data['name']
network_id = data['network_id']
network_label = data['network_label']
if network_id and network_label:
return True, False, {'status': 'ERROR',
'data': 'Specify either network_id'
' or network_label, not both'}
ip_list = data['ip_list'] if data['ip_list'] else []
cnw = pyrax.cloud_networks
data_json = {
'port': {}
}
if port_name:
data_json['port'].update({'name': port_name})
if network_label:
network = get_network_by_name(cnw, network_label)
network_id = network['id']
if network_id:
data_json['port'].update({'network_id': network_id})
subnet_id = get_subnet_id(cnw, network_id)
if ip_list:
data_json['port'].update({
'fixed_ips' : [
{'subnet_id': subnet_id, 'ip_address': ip} for ip in ip_list
]
})
try:
result, port = cnw.identity.method_post(uri_ports, data=data_json)
if result.status_code == 201:
return False, True, result.json()['port']
elif result.status_code == 422:
return False, False, result.json()
else:
return True, False, {'status': result.status_code, 'data':
result.json()}
except pyrax.exceptions.ClientException as e:
if e.code == 409:
# already exists, query it and return port object
if ip_list:
status_code, port = get_port(cnw, network_id, ip_list)
if status_code == 200:
return False, False, {'status': 'OK',
'data': 'Port with IP'
' already exists',
'port': port}
elif status_code == 404:
# Although the IP is technically not there
# quark will not allow to assign it for a while
# Return an error asking to wait 10 min.
return True, False, {'status': 'ERROR',
'data': 'IP is reserved by quark.'
' Wait 10 min and retry'}
else:
return True, False, {'status': 'hmmm',
'data': 'port not found undetermined'}
elif port_name:
port = get_port_by_name(cnw, network_id, port_name)
if port:
return False, False, {'status': 'OK',
'data': port,
'msg': 'Port by this name '
'already exists'}
else:
return True, False, 'Port not found'
else:
# the IP could be still reserved by quark, we are good (?)
return False, False, port
except Exception as e:
return True, False, {'status': 'ERROR', 'data': e.message}
def get_port_by_name(cnw, network_id, name):
try:
result, ports = cnw.identity.method_get(uri_ports)
if result.status_code == 200:
if ports:
ports_on_network = filter(lambda port: port['network_id'] == \
network_id, ports['ports'])
ports_with_name = filter(lambda port: port['name'] == name,
ports_on_network)
return ports_with_name[0] if ports_with_name else None
else:
return None
else:
return None
except Exception as e:
return None
def get_port_by_ip(cnw, network_id, ip_list):
try:
result, ports = cnw.identity.method_get(uri_ports)
if result.status_code == 200:
ports_on_network = filter(lambda port: port['network_id'] == \
network_id, ports['ports'])
for port in ports_on_network:
for ip in port['fixed_ips']:
if ip['ip_address'] in ip_list:
return port
else:
return None
except Exception as e:
return None
def delete_port_by_id(cnw, port_id):
try:
result, port = cnw.identity.method_delete(uri_ports + '/' + port_id)
if result.status_code == 204:
return False, True, {'status': 'SUCCESS'}
elif result.status_code == 404:
return False, False, {'status': result.status_code, 'data':
result.json()}
else:
return True, False, {'status': result.status_code, 'data':
result.json()}
except Exception as e:
return True, False, {'status': 'ERROR', 'data': e.message}
def get_network_by_name(cnw, network_label):
try:
result, networks = cnw.identity.method_get(uri_networks)
if result.status_code == 200:
network_list = filter(lambda net: net['name'] == network_label,
networks['networks'])
return network_list[0]
except Exception as e:
return None
def port_absent(data=None):
cnw = pyrax.cloud_networks
network_id = data['network_id']
port_id = data['port_id']
port_name = data['name']
ip_list = data['ip_list']
network_label = data['network_label']
if not network_id and network_label:
network = get_network_by_name(cnw, network_label)
if network:
network_id = network['id']
else:
return True, False, {'status': 'ERROR',
'data': 'did not find the network label {}'
.format(network_label)}
if port_id:
return delete_port_by_id(cnw, port_id)
if port_name:
port = get_port_by_name(cnw, network_id, port_name)
if port:
port_id = port['id']
return delete_port_by_id(cnw, port_id)
else:
return False, False, {'status': 'OK', 'data': 'Port name does not'
' exist'}
if ip_list:
port = get_port_by_ip(cnw, network_id, ip_list)
if port:
port_id = port['id']
return delete_port_by_id(cnw, port_id)
else:
return False, False, {'status': 'OK',
'data': 'IP not found on any port. Hope it'
' is expected'}
# if we are here we have an error
return True, False, {'status': 'ERROR', 'data': 'undetermined error'}
def main():
fields = {
'ip_list': {'required': False, 'type': 'list'},
'network_id': {'required': False, 'type': 'str'},
'network_label': {'required': False, 'type': 'str'},
'port_id': {'required': False, 'type': 'str'},
'name': {'required': False, 'type': 'str'},
'description': {'required': False, 'type': 'str'},
'region': {'required': True, 'type': 'str'},
'state': {
'default': 'present',
'choices': ['present', 'absent'],
'type': 'str'
}
}
choice_map = {
'present': port_present,
'absent': port_absent
}
module = AnsibleModule(argument_spec=fields)
pyrax.set_setting('identity_type', 'rackspace')
pyrax.set_credential_file('rax.py')
pyrax.set_setting('region', module.params['region'])
is_error, has_changed, result = \
choice_map.get(module.params['state'])(module.params)
if not is_error:
module.exit_json(changed=has_changed, port=result)
else:
module.fail_json(msg='Error', port=result)
if __name__ == '__main__':
main()
|
|
from collections import defaultdict
from django.contrib.auth.decorators import login_required
from django.contrib.messages import info
from django.core.urlresolvers import get_callable, reverse
from django.http import Http404, HttpResponse
from django.shortcuts import get_object_or_404, redirect
from django.template import RequestContext
from django.template.defaultfilters import slugify
from django.template.loader import get_template
from django.utils import simplejson
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from mezzanine.conf import settings
from mezzanine.utils.importing import import_dotted_path
from mezzanine.utils.views import render, set_cookie, paginate
from cartridge.shop import checkout
from cartridge.shop.forms import AddProductForm, DiscountForm, CartItemFormSet
from cartridge.shop.models import Product, ProductVariation, Order, OrderItem
from cartridge.shop.models import DiscountCode
from cartridge.shop.utils import recalculate_discount, sign
# Set up checkout handlers.
handler = lambda s: import_dotted_path(s) if s else lambda *args: None
billship_handler = handler(settings.SHOP_HANDLER_BILLING_SHIPPING)
payment_handler = handler(settings.SHOP_HANDLER_PAYMENT)
order_handler = handler(settings.SHOP_HANDLER_ORDER)
def product(request, slug, template="shop/product.html"):
"""
Display a product - convert the product variations to JSON as well as
handling adding the product to either the cart or the wishlist.
"""
published_products = Product.objects.published(for_user=request.user)
product = get_object_or_404(published_products, slug=slug)
fields = [f.name for f in ProductVariation.option_fields()]
variations = product.variations.all()
variations_json = simplejson.dumps([dict([(f, getattr(v, f))
for f in fields + ["sku", "image_id"]])
for v in variations])
to_cart = (request.method == "POST" and
request.POST.get("add_wishlist") is None)
initial_data = {}
if variations:
initial_data = dict([(f, getattr(variations[0], f)) for f in fields])
initial_data["quantity"] = 1
add_product_form = AddProductForm(request.POST or None, product=product,
initial=initial_data, to_cart=to_cart)
if request.method == "POST":
if add_product_form.is_valid():
if to_cart:
quantity = add_product_form.cleaned_data["quantity"]
request.cart.add_item(add_product_form.variation, quantity)
recalculate_discount(request)
info(request, _("Item added to cart"))
return redirect("shop_cart")
else:
skus = request.wishlist
sku = add_product_form.variation.sku
if sku not in skus:
skus.append(sku)
info(request, _("Item added to wishlist"))
response = redirect("shop_wishlist")
set_cookie(response, "wishlist", ",".join(skus))
return response
context = {
"product": product,
"editable_obj": product,
"images": product.images.all(),
"variations": variations,
"variations_json": variations_json,
"has_available_variations": any([v.has_price() for v in variations]),
"related_products": product.related_products.published(
for_user=request.user),
"add_product_form": add_product_form
}
return render(request, template, context)
@never_cache
def wishlist(request, template="shop/wishlist.html"):
"""
Display the wishlist and handle removing items from the wishlist and
adding them to the cart.
"""
skus = request.wishlist
error = None
if request.method == "POST":
to_cart = request.POST.get("add_cart")
add_product_form = AddProductForm(request.POST or None,
to_cart=to_cart)
if to_cart:
if add_product_form.is_valid():
request.cart.add_item(add_product_form.variation, 1)
recalculate_discount(request)
message = _("Item added to cart")
url = "shop_cart"
else:
error = add_product_form.errors.values()[0]
else:
message = _("Item removed from wishlist")
url = "shop_wishlist"
sku = request.POST.get("sku")
if sku in skus:
skus.remove(sku)
if not error:
info(request, message)
response = redirect(url)
set_cookie(response, "wishlist", ",".join(skus))
return response
# Remove skus from the cookie that no longer exist.
published_products = Product.objects.published(for_user=request.user)
f = {"product__in": published_products, "sku__in": skus}
wishlist = ProductVariation.objects.filter(**f).select_related(depth=1)
wishlist = sorted(wishlist, key=lambda v: skus.index(v.sku))
context = {"wishlist_items": wishlist, "error": error}
response = render(request, template, context)
if len(wishlist) < len(skus):
skus = [variation.sku for variation in wishlist]
set_cookie(response, "wishlist", ",".join(skus))
return response
@never_cache
def cart(request, template="shop/cart.html"):
"""
Display cart and handle removing items from the cart.
"""
cart_formset = CartItemFormSet(instance=request.cart)
discount_form = DiscountForm(request, request.POST or None)
if request.method == "POST":
valid = True
if request.POST.get("update_cart"):
valid = request.cart.has_items()
if not valid:
# Session timed out.
info(request, _("Your cart has expired"))
else:
cart_formset = CartItemFormSet(request.POST,
instance=request.cart)
valid = cart_formset.is_valid()
if valid:
cart_formset.save()
recalculate_discount(request)
info(request, _("Cart updated"))
else:
valid = discount_form.is_valid()
if valid:
discount_form.set_discount()
if valid:
return redirect("shop_cart")
context = {"cart_formset": cart_formset}
settings.use_editable()
if (settings.SHOP_DISCOUNT_FIELD_IN_CART and
DiscountCode.objects.active().count() > 0):
context["discount_form"] = discount_form
return render(request, template, context)
@never_cache
def checkout_steps(request):
"""
Display the order form and handle processing of each step.
"""
# Do the authentication check here rather than using standard
# login_required decorator. This means we can check for a custom
# LOGIN_URL and fall back to our own login view.
authenticated = request.user.is_authenticated()
if settings.SHOP_CHECKOUT_ACCOUNT_REQUIRED and not authenticated:
url = "%s?next=%s" % (settings.LOGIN_URL, reverse("shop_checkout"))
return redirect(url)
# Determine the Form class to use during the checkout process
form_class = get_callable(settings.SHOP_CHECKOUT_FORM_CLASS)
step = int(request.POST.get("step", checkout.CHECKOUT_STEP_FIRST))
initial = checkout.initial_order_data(request)
form = form_class(request, step, initial=initial)
data = request.POST
checkout_errors = []
if request.POST.get("back") is not None:
# Back button in the form was pressed - load the order form
# for the previous step and maintain the field values entered.
step -= 1
form = form_class(request, step, initial=initial)
elif request.method == "POST" and request.cart.has_items():
form = form_class(request, step, initial=initial, data=data)
if form.is_valid():
# Copy the current form fields to the session so that
# they're maintained if the customer leaves the checkout
# process, but remove sensitive fields from the session
# such as the credit card fields so that they're never
# stored anywhere.
request.session["order"] = dict(form.cleaned_data)
sensitive_card_fields = ("card_number", "card_expiry_month",
"card_expiry_year", "card_ccv")
for field in sensitive_card_fields:
if field in request.session["order"]:
del request.session["order"][field]
# FIRST CHECKOUT STEP - handle shipping and discount code.
if step == checkout.CHECKOUT_STEP_FIRST:
try:
billship_handler(request, form)
except checkout.CheckoutError, e:
checkout_errors.append(e)
form.set_discount()
# FINAL CHECKOUT STEP - handle payment and process order.
if step == checkout.CHECKOUT_STEP_LAST and not checkout_errors:
# Create and save the inital order object so that
# the payment handler has access to all of the order
# fields. If there is a payment error then delete the
# order, otherwise remove the cart items from stock
# and send the order reciept email.
order = form.save(commit=False)
order.setup(request)
# Try payment.
try:
transaction_id = payment_handler(request, form, order)
except checkout.CheckoutError, e:
# Error in payment handler.
order.delete()
checkout_errors.append(e)
if settings.SHOP_CHECKOUT_STEPS_CONFIRMATION:
step -= 1
else:
# Finalize order - ``order.complete()`` performs
# final cleanup of session and cart.
# ``order_handler()`` can be defined by the
# developer to implement custom order processing.
# Then send the order email to the customer.
order.transaction_id = transaction_id
order.complete(request)
order_handler(request, form, order)
checkout.send_order_email(request, order)
# Set the cookie for remembering address details
# if the "remember" checkbox was checked.
response = redirect("shop_complete")
if form.cleaned_data.get("remember") is not None:
remembered = "%s:%s" % (sign(order.key), order.key)
set_cookie(response, "remember", remembered,
secure=request.is_secure())
else:
response.delete_cookie("remember")
return response
# If any checkout errors, assign them to a new form and
# re-run is_valid. If valid, then set form to the next step.
form = form_class(request, step, initial=initial, data=data,
errors=checkout_errors)
if form.is_valid():
step += 1
form = form_class(request, step, initial=initial)
step_vars = checkout.CHECKOUT_STEPS[step - 1]
template = "shop/%s.html" % step_vars["template"]
CHECKOUT_STEP_FIRST = step == checkout.CHECKOUT_STEP_FIRST
context = {"form": form, "CHECKOUT_STEP_FIRST": CHECKOUT_STEP_FIRST,
"step_title": step_vars["title"], "step_url": step_vars["url"],
"steps": checkout.CHECKOUT_STEPS, "step": step}
return render(request, template, context)
@never_cache
def complete(request, template="shop/complete.html"):
"""
Redirected to once an order is complete - pass the order object
for tracking items via Google Anayltics, and displaying in
the template if required.
"""
try:
order = Order.objects.from_request(request)
except Order.DoesNotExist:
raise Http404
items = order.items.all()
# Assign product names to each of the items since they're not
# stored.
skus = [item.sku for item in items]
variations = ProductVariation.objects.filter(sku__in=skus)
names = {}
for variation in variations.select_related(depth=1):
names[variation.sku] = variation.product.title
for i, item in enumerate(items):
setattr(items[i], "name", names[item.sku])
context = {"order": order, "items": items,
"steps": checkout.CHECKOUT_STEPS}
return render(request, template, context)
def invoice(request, order_id, template="shop/order_invoice.html"):
"""
Display a plain text invoice for the given order. The order must
belong to the user which is checked via session or ID if
authenticated, or if the current user is staff.
"""
lookup = {"id": order_id}
if not request.user.is_authenticated():
lookup["key"] = request.session.session_key
elif not request.user.is_staff:
lookup["user_id"] = request.user.id
order = get_object_or_404(Order, **lookup)
context = {"order": order}
context.update(order.details_as_dict())
context = RequestContext(request, context)
if request.GET.get("format") == "pdf":
response = HttpResponse(mimetype="application/pdf")
name = slugify("%s-invoice-%s" % (settings.SITE_TITLE, order.id))
response["Content-Disposition"] = "attachment; filename=%s.pdf" % name
html = get_template(template).render(context)
import ho.pisa
ho.pisa.CreatePDF(html, response)
return response
return render(request, template, context)
@login_required
def order_history(request, template="shop/order_history.html"):
"""
Display a list of the currently logged-in user's past orders.
"""
all_orders = Order.objects.filter(user_id=request.user.id)
orders = paginate(all_orders.order_by('-time'),
request.GET.get("page", 1),
settings.SHOP_PER_PAGE_CATEGORY,
settings.MAX_PAGING_LINKS)
# Add the total quantity to each order - this can probably be
# replaced with fetch_related and Sum when we drop Django 1.3
order_quantities = defaultdict(int)
for item in OrderItem.objects.filter(order__user_id=request.user.id):
order_quantities[item.order_id] += item.quantity
for order in orders.object_list:
setattr(order, "quantity_total", order_quantities[order.id])
context = {"orders": orders}
return render(request, template, context)
|
|
#
# Settings.py -- Simple class to manage stateful user preferences.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import os
import pprint
import ast
import numpy
from . import Callback
from . import Bunch
unset_value = ("^^UNSET^^")
class SettingError(Exception):
pass
class Setting(Callback.Callbacks):
def __init__(self, value=unset_value, name=None, logger=None,
check_fn=None):
Callback.Callbacks.__init__(self)
self.value = value
self._unset = (value == unset_value)
self.name = name
self.logger = logger
if check_fn is None:
check_fn = self._check_none
self.check_fn = check_fn
# For callbacks
for name in ('set', ):
self.enable_callback(name)
def _check_none(self, value):
return value
def set(self, value, callback=True):
self.value = self.check_fn(value)
if callback:
self.make_callback('set', value)
def get(self, *args):
if self._unset:
if len(args) == 0:
raise KeyError("setting['%s'] value is not set!" % (
self.name))
else:
assert len(args) == 1, \
SettingError("Illegal parameter use to get(): %s" % (
str(args)))
return args[0]
return self.value
def __repr__(self):
return repr(self.value)
def __str__(self):
return str(self.value)
class SettingGroup(object):
def __init__(self, name=None, logger=None, preffile=None):
self.name = name
self.logger = logger
self.preffile = preffile
self.group = Bunch.Bunch()
def addSettings(self, **kwdargs):
for key, value in kwdargs.items():
self.group[key] = Setting(value=value, name=key,
logger=self.logger)
# TODO: add group change callback?
def getSetting(self, key):
return self.group[key]
def shareSettings(self, other, keylist=None):
if keylist is None:
keylist = self.group.keys()
for key in keylist:
other.group[key] = self.group[key]
def copySettings(self, other, keylist=None):
if keylist is None:
keylist = self.group.keys()
d = {}
for key in keylist:
d[key] = self.get(key)
other.setDict(d)
def setdefault(self, key, value):
if key in self.group:
return self.group[key].get(value)
else:
d = { key: value }
self.addSettings(**d)
return self.group[key].get(value)
def addDefaults(self, **kwdargs):
for key, value in kwdargs.items():
self.setdefault(key, value)
def setDefaults(self, **kwdargs):
return self.addDefaults(**kwdargs)
def get(self, *args):
key = args[0]
if len(args) == 1:
return self.group[key].get()
if len(args) == 2:
return self.setdefault(key, args[1])
def getDict(self):
return dict([[name, self.group[name].value] for name in self.group.keys()])
def setDict(self, d, callback=True):
for key, value in d.items():
if key not in self.group:
self.setdefault(key, value)
else:
self.group[key].set(value, callback=callback)
def set(self, callback=True, **kwdargs):
self.setDict(kwdargs, callback=callback)
def __getitem__(self, key):
return self.group[key].value
def __setitem__(self, key, value):
self.group[key].set(value)
def has_key(self, key):
return key in self.group
def load(self, onError='raise'):
try:
d = {}
with open(self.preffile, 'r') as in_f:
buf = in_f.read()
for line in buf.split('\n'):
line = line.strip()
# skip comments and anything that doesn't look like an
# assignment
if line.startswith('#') or (not ('=' in line)):
continue
else:
try:
i = line.index('=')
key = line[:i].strip()
val = ast.literal_eval(line[i+1:].strip())
d[key] = val
except Exception as e:
# silently skip parse errors, for now
continue
self.setDict(d)
except Exception as e:
errmsg = "Error opening settings file (%s): %s" % (
self.preffile, str(e))
if onError == 'silent':
pass
elif onError == 'warn':
self.logger.warning(errmsg)
else:
raise SettingError(errmsg)
def _check(self, d):
if isinstance(d, dict):
for key, value in d.items():
d[key] = self._check(value)
return d
try:
if numpy.isnan(d):
return 0.0
elif numpy.isinf(d):
return 0.0
except Exception:
pass
return d
def save(self):
d = self.getDict()
# sanitize data -- hard to parse NaN or Inf
self._check(d)
try:
# sort keys for easy reading/editing
keys = list(d.keys())
keys.sort()
with open(self.preffile, 'w') as out_f:
for key in keys:
out_f.write("%s = %s\n" % (key, repr(d[key])))
except Exception as e:
errmsg = "Error opening settings file (%s): %s" % (
self.preffile, str(e))
self.logger.error(errmsg)
class Preferences(object):
def __init__(self, basefolder=None, logger=None):
self.folder = basefolder
self.logger = logger
self.settings = Bunch.Bunch(caseless=True)
def setDefaults(self, category, **kwdargs):
self.settings[category].addDefaults(**kwdargs)
def getSettings(self, category):
return self.settings[category]
def remove_settings(self, category):
del self.settings[category]
def get_dict_category(self, category):
return self.settings[category].getDict()
def createCategory(self, category):
if category not in self.settings:
suffix = '.cfg'
path = os.path.join(self.folder, category + suffix)
self.settings[category] = SettingGroup(logger=self.logger,
name=category,
preffile=path)
return self.settings[category]
def get_baseFolder(self):
return self.folder
def getDict(self):
return dict([[name, self.settings[name].getDict()] for name in
self.settings.keys()])
#END
|
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for user."""
from core.platform import models
import feconf
(user_models,) = models.Registry.import_models([models.NAMES.user])
class UserGlobalPrefs(object):
"""Domain object for user global email preferences.
Attributes:
can_receive_email_updates: bool. Whether the user can receive
email updates.
can_receive_editor_role_email: bool. Whether the user can receive
emails notifying them of role changes.
can_receive_feedback_message_email: bool. Whether the user can
receive emails when users submit feedback to their explorations.
can_receive_subscription_email: bool. Whether the user can receive
subscription emails notifying them about new explorations.
"""
def __init__(
self, can_receive_email_updates, can_receive_editor_role_email,
can_receive_feedback_message_email, can_receive_subscription_email):
"""Constructs a UserGlobalPrefs domain object.
Args:
can_receive_email_updates: bool. Whether the user can receive
email updates.
can_receive_editor_role_email: bool. Whether the user can receive
emails notifying them of role changes.
can_receive_feedback_message_email: bool. Whether the user can
receive emails when users submit feedback to their explorations.
can_receive_subscription_email: bool. Whether the user can receive
subscription emails notifying them about new explorations.
"""
self.can_receive_email_updates = can_receive_email_updates
self.can_receive_editor_role_email = can_receive_editor_role_email
self.can_receive_feedback_message_email = ( #pylint: disable=invalid-name
can_receive_feedback_message_email)
self.can_receive_subscription_email = can_receive_subscription_email
@classmethod
def create_default_prefs(cls):
"""Returns UserGlobalPrefs with default attributes."""
return cls(
feconf.DEFAULT_EMAIL_UPDATES_PREFERENCE,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
class UserExplorationPrefs(object):
"""Domain object for user exploration email preferences.
Attributes:
mute_feedback_notifications: bool. Whether the given user has muted
feedback emails.
mute_suggestion_notifications: bool. Whether the given user has
muted suggestion emails.
"""
def __init__(self, mute_feedback_notifications,
mute_suggestion_notifications):
"""Constructs a UserExplorationPrefs domain object.
Args:
mute_feedback_notifications: bool. Whether the given user has muted
feedback emails.
mute_suggestion_notifications: bool. Whether the given user has
muted suggestion emails.
"""
self.mute_feedback_notifications = mute_feedback_notifications
self.mute_suggestion_notifications = mute_suggestion_notifications
@classmethod
def create_default_prefs(cls):
"""Returns UserExplorationPrefs with default attributes."""
return cls(
feconf.DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE,
feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE)
def to_dict(self):
"""Return dictionary representation of UserExplorationPrefs.
Return:
dict. The keys of the dict are:
'mute_feedback_notifications': bool. Whether the given user has
muted feedback emails.
'mute_suggestion_notifications': bool. Whether the given user
has muted suggestion emails.
"""
return {
'mute_feedback_notifications': self.mute_feedback_notifications,
'mute_suggestion_notifications': self.mute_suggestion_notifications
}
class ExpUserLastPlaythrough(object):
"""Domain object for an exploration last playthrough model."""
def __init__(self, user_id, exploration_id, last_played_exp_version,
last_updated, last_played_state_name):
self.id = '%s.%s' % (user_id, exploration_id)
self.user_id = user_id
self.exploration_id = exploration_id
self.last_played_exp_version = last_played_exp_version
self.last_updated = last_updated
self.last_played_state_name = last_played_state_name
def update_last_played_information(self, last_played_exp_version,
last_played_state_name):
"""Updates the last playthrough information of the user.
Args:
last_played_exp_version: int. The version of the exploration that
was played by the user.
last_played_state_name: str. The name of the state at which the
learner left the exploration.
"""
self.last_played_exp_version = last_played_exp_version
self.last_played_state_name = last_played_state_name
class IncompleteActivities(object):
"""Domain object for the incomplete activities model."""
def __init__(self, user_id, exploration_ids,
collection_ids):
self.id = user_id
self.exploration_ids = exploration_ids
self.collection_ids = collection_ids
def add_exploration_id(self, exploration_id):
"""Adds the exploration id to the list of incomplete exploration ids."""
self.exploration_ids.append(exploration_id)
def remove_exploration_id(self, exploration_id):
"""Removes the exploration id from the list of incomplete exploration
ids.
"""
self.exploration_ids.remove(exploration_id)
def add_collection_id(self, collection_id):
"""Adds the collection id to the list of incomplete collection ids."""
self.collection_ids.append(collection_id)
def remove_collection_id(self, collection_id):
"""Removes the collection id from the list of incomplete collection
ids.
"""
self.collection_ids.remove(collection_id)
class CompletedActivities(object):
"""Domain object for the activities completed by learner model."""
def __init__(self, user_id, exploration_ids,
collection_ids):
self.id = user_id
self.exploration_ids = exploration_ids
self.collection_ids = collection_ids
def add_exploration_id(self, exploration_id):
"""Adds the exploration id to the list of completed exploration ids."""
self.exploration_ids.append(exploration_id)
def remove_exploration_id(self, exploration_id):
"""Removes the exploration id from the list of completed exploration
ids.
"""
self.exploration_ids.remove(exploration_id)
def add_collection_id(self, collection_id):
"""Adds the collection id to the list of completed collection ids."""
self.collection_ids.append(collection_id)
def remove_collection_id(self, collection_id):
"""Removes the collection id from the list of completed collection
ids.
"""
self.collection_ids.remove(collection_id)
class LearnerPlaylist(object):
"""Domain object for the learner playlist model,"""
def __init__(self, user_id, exploration_ids,
collection_ids):
self.id = user_id
self.exploration_ids = exploration_ids
self.collection_ids = collection_ids
def insert_exploration_id_at_given_position(
self, exploration_id, position_to_be_inserted):
"""Inserts the given exploration id at the given position.
Args:
exploration_id: str. The exploration id to be inserted into the
play later list.
position_to_be_inserted: The position at which it is to be inserted.
"""
print "ello", position_to_be_inserted
self.exploration_ids.insert(position_to_be_inserted, exploration_id)
def add_exploration_id_to_list(self, exploration_id):
"""Inserts the exploration id at the end of the list.
Args:
exploration_id: str. The exploration id to be appended to the end
of the list.
"""
self.exploration_ids.append(exploration_id)
def insert_collection_id_at_given_position(
self, collection_id, position_to_be_inserted):
"""Inserts the given collection id at the given position.
Args:
collection_id: str. The collection id to be inserted into the
play later list.
position_to_be_inserted: The position at which it is to be inserted.
"""
self.collection_ids.insert(position_to_be_inserted, collection_id)
def add_collection_id_to_list(self, collection_id):
"""Inserts the collection id at the end of the list.
Args:
collection_id: str. The collection id to be appended to the end
of the list.
"""
self.collection_ids.append(collection_id)
def remove_exploration_id(self, exploration_id):
"""Removes the exploration id from the learner playlist.
exploration_id: str. The id of the exploration to be removed.
"""
self.exploration_ids.remove(exploration_id)
def remove_collection_id(self, collection_id):
"""Removes the collection id from the learner playlist.
collection_id: str. The id of the collection to be removed.
"""
self.collection_ids.remove(collection_id)
|
|
# -*- coding: utf-8 -*-
from ccxt.async.base.exchange import Exchange
import base64
import hashlib
from ccxt.base.errors import ExchangeError
class bithumb (Exchange):
def describe(self):
return self.deep_extend(super(bithumb, self).describe(), {
'id': 'bithumb',
'name': 'Bithumb',
'countries': 'KR', # South Korea
'rateLimit': 500,
'hasCORS': True,
'hasFetchTickers': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/30597177-ea800172-9d5e-11e7-804c-b9d4fa9b56b0.jpg',
'api': {
'public': 'https://api.bithumb.com/public',
'private': 'https://api.bithumb.com',
},
'www': 'https://www.bithumb.com',
'doc': 'https://www.bithumb.com/u1/US127',
},
'api': {
'public': {
'get': [
'ticker/{currency}',
'ticker/all',
'orderbook/{currency}',
'orderbook/all',
'recent_transactions/{currency}',
'recent_transactions/all',
],
},
'private': {
'post': [
'info/account',
'info/balance',
'info/wallet_address',
'info/ticker',
'info/orders',
'info/user_transactions',
'trade/place',
'info/order_detail',
'trade/cancel',
'trade/btc_withdrawal',
'trade/krw_deposit',
'trade/krw_withdrawal',
'trade/market_buy',
'trade/market_sell',
],
},
},
'fees': {
'trading': {
'maker': 0.15 / 100,
'taker': 0.15 / 100,
},
},
})
async def fetch_markets(self):
markets = await self.publicGetTickerAll()
currencies = list(markets['data'].keys())
result = []
for i in range(0, len(currencies)):
id = currencies[i]
if id != 'date':
market = markets['data'][id]
base = id
quote = 'KRW'
symbol = id + '/' + quote
result.append(self.extend(self.fees['trading'], {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
'lot': None,
'active': True,
'precision': {
'amount': None,
'price': None,
},
'limits': {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
}))
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privatePostInfoBalance(self.extend({
'currency': 'ALL',
}, params))
result = {'info': response}
balances = response['data']
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
account = self.account()
lowercase = currency.lower()
account['total'] = self.safe_float(balances, 'total_' + lowercase)
account['used'] = self.safe_float(balances, 'in_use_' + lowercase)
account['free'] = self.safe_float(balances, 'available_' + lowercase)
result[currency] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetOrderbookCurrency(self.extend({
'count': 50, # max = 50
'currency': market['base'],
}, params))
orderbook = response['data']
timestamp = int(orderbook['timestamp'])
return self.parse_order_book(orderbook, timestamp, 'bids', 'asks', 'price', 'quantity')
def parse_ticker(self, ticker, market=None):
timestamp = int(ticker['date'])
symbol = None
if market:
symbol = market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'max_price'),
'low': self.safe_float(ticker, 'min_price'),
'bid': self.safe_float(ticker, 'buy_price'),
'ask': self.safe_float(ticker, 'sell_price'),
'vwap': None,
'open': self.safe_float(ticker, 'opening_price'),
'close': self.safe_float(ticker, 'closing_price'),
'first': None,
'last': self.safe_float(ticker, 'last_trade'),
'change': None,
'percentage': None,
'average': self.safe_float(ticker, 'average_price'),
'baseVolume': self.safe_float(ticker, 'volume_1day'),
'quoteVolume': None,
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetTickerAll(params)
result = {}
timestamp = response['data']['date']
tickers = self.omit(response['data'], 'date')
ids = list(tickers.keys())
for i in range(0, len(ids)):
id = ids[i]
symbol = id
market = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = tickers[id]
ticker['date'] = timestamp
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetTickerCurrency(self.extend({
'currency': market['base'],
}, params))
return self.parse_ticker(response['data'], market)
def parse_trade(self, trade, market):
# a workaround for their bug in date format, hours are not 0-padded
transaction_date, transaction_time = trade['transaction_date'].split(' ')
transaction_time_short = len(transaction_time) < 8
if transaction_time_short:
transaction_time = '0' + transaction_time
timestamp = self.parse8601(transaction_date + ' ' + transaction_time)
side = 'sell' if (trade['type'] == 'ask') else 'buy'
return {
'id': None,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'order': None,
'type': None,
'side': side,
'price': float(trade['price']),
'amount': float(trade['units_traded']),
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetRecentTransactionsCurrency(self.extend({
'currency': market['base'],
'count': 100, # max = 100
}, params))
return self.parse_trades(response['data'], market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = None
method = 'privatePostTrade'
if type == 'limit':
request = {
'order_currency': market['id'],
'Payment_currency': market['quote'],
'units': amount,
'price': price,
'type': 'bid' if (side == 'buy') else 'ask',
}
method += 'Place'
elif type == 'market':
request = {
'currency': market['id'],
'units': amount,
}
method += 'Market' + self.capitalize(side)
response = await getattr(self, method)(self.extend(request, params))
id = None
if 'order_id' in response:
if response['order_id']:
id = str(response['order_id'])
return {
'info': response,
'id': id,
}
async def cancel_order(self, id, symbol=None, params={}):
side = ('side' in list(params.keys()))
if not side:
raise ExchangeError(self.id + ' cancelOrder requires a side parameter(sell or buy) and a currency parameter')
side = 'purchase' if (side == 'buy') else 'sales'
currency = ('currency' in list(params.keys()))
if not currency:
raise ExchangeError(self.id + ' cancelOrder requires a currency parameter')
return await self.privatePostTradeCancel({
'order_id': id,
'type': params['side'],
'currency': params['currency'],
})
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
endpoint = '/' + self.implode_params(path, params)
url = self.urls['api'][api] + endpoint
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
body = self.urlencode(self.extend({
'endpoint': endpoint,
}, query))
nonce = str(self.nonce())
auth = endpoint + "\0" + body + "\0" + nonce
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha512)
signature64 = self.decode(base64.b64encode(self.encode(signature)))
headers = {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
'Api-Key': self.apiKey,
'Api-Sign': str(signature64),
'Api-Nonce': nonce,
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'status' in response:
if response['status'] == '0000':
return response
raise ExchangeError(self.id + ' ' + self.json(response))
return response
|
|
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2021 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
from pymeasure.instruments import Instrument
from pymeasure.instruments.validators import (
truncated_range, truncated_discrete_set,
strict_discrete_set
)
from pymeasure.adapters import VISAAdapter
from .buffer import KeithleyBuffer
class Keithley2000(Instrument, KeithleyBuffer):
""" Represents the Keithley 2000 Multimeter and provides a high-level
interface for interacting with the instrument.
.. code-block:: python
meter = Keithley2000("GPIB::1")
meter.measure_voltage()
print(meter.voltage)
"""
MODES = {
'current':'CURR:DC', 'current ac':'CURR:AC',
'voltage':'VOLT:DC', 'voltage ac':'VOLT:AC',
'resistance':'RES', 'resistance 4W':'FRES',
'period':'PER', 'frequency':'FREQ',
'temperature':'TEMP', 'diode':'DIOD',
'continuity':'CONT'
}
mode = Instrument.control(
":CONF?", ":CONF:%s",
""" A string property that controls the configuration mode for measurements,
which can take the values: :code:'current' (DC), :code:'current ac',
:code:'voltage' (DC), :code:'voltage ac', :code:'resistance' (2-wire),
:code:'resistance 4W' (4-wire), :code:'period', :code:'frequency',
:code:'temperature', :code:'diode', and :code:'frequency'. """,
validator=strict_discrete_set,
values=MODES,
map_values=True,
get_process=lambda v: v.replace('"', '')
)
beep_state = Instrument.control(
":SYST:BEEP:STAT?",
":SYST:BEEP:STAT %g",
""" A string property that enables or disables the system status beeper,
which can take the values: :code:'enabled' and :code:'disabled'. """,
validator=strict_discrete_set,
values={'enabled':1, 'disabled':0},
map_values=True
)
###############
# Current (A) #
###############
current = Instrument.measurement(":READ?",
""" Reads a DC or AC current measurement in Amps, based on the
active :attr:`~.Keithley2000.mode`. """
)
current_range = Instrument.control(
":SENS:CURR:RANG?", ":SENS:CURR:RANG:AUTO 0;:SENS:CURR:RANG %g",
""" A floating point property that controls the DC current range in
Amps, which can take values from 0 to 3.1 A.
Auto-range is disabled when this property is set. """,
validator=truncated_range,
values=[0, 3.1]
)
current_reference = Instrument.control(
":SENS:CURR:REF?", ":SENS:CURR:REF %g",
""" A floating point property that controls the DC current reference
value in Amps, which can take values from -3.1 to 3.1 A. """,
validator=truncated_range,
values=[-3.1, 3.1]
)
current_nplc = Instrument.control(
":SENS:CURR:NPLC?", ":SENS:CURR:NPLC %g",
""" A floating point property that controls the number of power line cycles
(NPLC) for the DC current measurements, which sets the integration period
and measurement speed. Takes values from 0.01 to 10, where 0.1, 1, and 10 are
Fast, Medium, and Slow respectively. """
)
current_digits = Instrument.control(
":SENS:CURR:DIG?", ":SENS:CURR:DIG %d",
""" An integer property that controls the number of digits in the DC current
readings, which can take values from 4 to 7. """,
validator=truncated_discrete_set,
values=[4, 5, 6, 7],
cast=int,
)
current_ac_range = Instrument.control(
":SENS:CURR:AC:RANG?", ":SENS:CURR:AC:RANG:AUTO 0;:SENS:CURR:AC:RANG %g",
""" A floating point property that controls the AC current range in
Amps, which can take values from 0 to 3.1 A.
Auto-range is disabled when this property is set. """,
validator=truncated_range,
values=[0, 3.1]
)
current_ac_reference = Instrument.control(
":SENS:CURR:AC:REF?", ":SENS:CURR:AC:REF %g",
""" A floating point property that controls the AC current reference
value in Amps, which can take values from -3.1 to 3.1 A. """,
validator=truncated_range,
values=[-3.1, 3.1]
)
current_ac_nplc = Instrument.control(
":SENS:CURR:AC:NPLC?", ":SENS:CURR:AC:NPLC %g",
""" A floating point property that controls the number of power line cycles
(NPLC) for the AC current measurements, which sets the integration period
and measurement speed. Takes values from 0.01 to 10, where 0.1, 1, and 10 are
Fast, Medium, and Slow respectively. """
)
current_ac_digits = Instrument.control(
":SENS:CURR:AC:DIG?", ":SENS:CURR:AC:DIG %d",
""" An integer property that controls the number of digits in the AC current
readings, which can take values from 4 to 7. """,
validator=truncated_discrete_set,
values=[4, 5, 6, 7],
cast=int
)
current_ac_bandwidth = Instrument.control(
":SENS:CURR:AC:DET:BAND?", ":SENS:CURR:AC:DET:BAND %g",
""" A floating point property that sets the AC current detector
bandwidth in Hz, which can take the values 3, 30, and 300 Hz. """,
validator=truncated_discrete_set,
values=[3, 30, 300]
)
###############
# Voltage (V) #
###############
voltage = Instrument.measurement(":READ?",
""" Reads a DC or AC voltage measurement in Volts, based on the
active :attr:`~.Keithley2000.mode`. """
)
voltage_range = Instrument.control(
":SENS:VOLT:RANG?", ":SENS:VOLT:RANG:AUTO 0;:SENS:VOLT:RANG %g",
""" A floating point property that controls the DC voltage range in
Volts, which can take values from 0 to 1010 V.
Auto-range is disabled when this property is set. """,
validator=truncated_range,
values=[0, 1010]
)
voltage_reference = Instrument.control(
":SENS:VOLT:REF?", ":SENS:VOLT:REF %g",
""" A floating point property that controls the DC voltage reference
value in Volts, which can take values from -1010 to 1010 V. """,
validator=truncated_range,
values=[-1010, 1010]
)
voltage_nplc = Instrument.control(
":SENS:CURRVOLT:NPLC?", ":SENS:VOLT:NPLC %g",
""" A floating point property that controls the number of power line cycles
(NPLC) for the DC voltage measurements, which sets the integration period
and measurement speed. Takes values from 0.01 to 10, where 0.1, 1, and 10 are
Fast, Medium, and Slow respectively. """
)
voltage_digits = Instrument.control(
":SENS:VOLT:DIG?", ":SENS:VOLT:DIG %d",
""" An integer property that controls the number of digits in the DC voltage
readings, which can take values from 4 to 7. """,
validator=truncated_discrete_set,
values=[4, 5, 6, 7],
cast=int
)
voltage_ac_range = Instrument.control(
":SENS:VOLT:AC:RANG?", ":SENS:VOLT:RANG:AUTO 0;:SENS:VOLT:AC:RANG %g",
""" A floating point property that controls the AC voltage range in
Volts, which can take values from 0 to 757.5 V.
Auto-range is disabled when this property is set. """,
validator=truncated_range,
values=[0, 757.5]
)
voltage_ac_reference = Instrument.control(
":SENS:VOLT:AC:REF?", ":SENS:VOLT:AC:REF %g",
""" A floating point property that controls the AC voltage reference
value in Volts, which can take values from -757.5 to 757.5 Volts. """,
validator=truncated_range,
values=[-757.5, 757.5]
)
voltage_ac_nplc = Instrument.control(
":SENS:VOLT:AC:NPLC?", ":SENS:VOLT:AC:NPLC %g",
""" A floating point property that controls the number of power line cycles
(NPLC) for the AC voltage measurements, which sets the integration period
and measurement speed. Takes values from 0.01 to 10, where 0.1, 1, and 10 are
Fast, Medium, and Slow respectively. """
)
voltage_ac_digits = Instrument.control(
":SENS:VOLT:AC:DIG?", ":SENS:VOLT:AC:DIG %d",
""" An integer property that controls the number of digits in the AC voltage
readings, which can take values from 4 to 7. """,
validator=truncated_discrete_set,
values=[4, 5, 6, 7],
cast=int
)
voltage_ac_bandwidth = Instrument.control(
":SENS:VOLT:AC:DET:BAND?", ":SENS:VOLT:AC:DET:BAND %g",
""" A floating point property that sets the AC voltage detector
bandwidth in Hz, which can take the values 3, 30, and 300 Hz. """,
validator=truncated_discrete_set,
values=[3, 30, 300]
)
####################
# Resistance (Ohm) #
####################
resistance = Instrument.measurement(":READ?",
""" Reads a resistance measurement in Ohms for both 2-wire and 4-wire
configurations, based on the active :attr:`~.Keithley2000.mode`. """
)
resistance_range = Instrument.control(
":SENS:RES:RANG?", ":SENS:RES:RANG:AUTO 0;:SENS:RES:RANG %g",
""" A floating point property that controls the 2-wire resistance range
in Ohms, which can take values from 0 to 120 MOhms.
Auto-range is disabled when this property is set. """,
validator=truncated_range,
values=[0, 120e6]
)
resistance_reference = Instrument.control(
":SENS:RES:REF?", ":SENS:RES:REF %g",
""" A floating point property that controls the 2-wire resistance
reference value in Ohms, which can take values from 0 to 120 MOhms. """,
validator=truncated_range,
values=[0, 120e6]
)
resistance_nplc = Instrument.control(
":SENS:RES:NPLC?", ":SENS:RES:NPLC %g",
""" A floating point property that controls the number of power line cycles
(NPLC) for the 2-wire resistance measurements, which sets the integration period
and measurement speed. Takes values from 0.01 to 10, where 0.1, 1, and 10 are
Fast, Medium, and Slow respectively. """
)
resistance_digits = Instrument.control(
":SENS:RES:DIG?", ":SENS:RES:DIG %d",
""" An integer property that controls the number of digits in the 2-wire
resistance readings, which can take values from 4 to 7. """,
validator=truncated_discrete_set,
values=[4, 5, 6, 7],
cast=int
)
resistance_4W_range = Instrument.control(
":SENS:FRES:RANG?", ":SENS:FRES:RANG:AUTO 0;:SENS:FRES:RANG %g",
""" A floating point property that controls the 4-wire resistance range
in Ohms, which can take values from 0 to 120 MOhms.
Auto-range is disabled when this property is set. """,
validator=truncated_range,
values=[0, 120e6]
)
resistance_4W_reference = Instrument.control(
":SENS:FRES:REF?", ":SENS:FRES:REF %g",
""" A floating point property that controls the 4-wire resistance
reference value in Ohms, which can take values from 0 to 120 MOhms. """,
validator=truncated_range,
values=[0, 120e6]
)
resistance_4W_nplc = Instrument.control(
":SENS:FRES:NPLC?", ":SENS:FRES:NPLC %g",
""" A floating point property that controls the number of power line cycles
(NPLC) for the 4-wire resistance measurements, which sets the integration period
and measurement speed. Takes values from 0.01 to 10, where 0.1, 1, and 10 are
Fast, Medium, and Slow respectively. """
)
resistance_4W_digits = Instrument.control(
":SENS:FRES:DIG?", ":SENS:FRES:DIG %d",
""" An integer property that controls the number of digits in the 4-wire
resistance readings, which can take values from 4 to 7. """,
validator=truncated_discrete_set,
values=[4, 5, 6, 7],
cast=int
)
##################
# Frequency (Hz) #
##################
frequency = Instrument.measurement(":READ?",
""" Reads a frequency measurement in Hz, based on the
active :attr:`~.Keithley2000.mode`. """
)
frequency_reference = Instrument.control(
":SENS:FREQ:REF?", ":SENS:FREQ:REF %g",
""" A floating point property that controls the frequency reference
value in Hz, which can take values from 0 to 15 MHz. """,
validator=truncated_range,
values=[0, 15e6]
)
frequency_digits = Instrument.control(
":SENS:FREQ:DIG?", ":SENS:FREQ:DIG %d",
""" An integer property that controls the number of digits in the frequency
readings, which can take values from 4 to 7. """,
validator=truncated_discrete_set,
values=[4, 5, 6, 7],
cast=int
)
frequency_threshold = Instrument.control(
":SENS:FREQ:THR:VOLT:RANG?", ":SENS:FREQ:THR:VOLT:RANG %g",
""" A floating point property that controls the voltage signal threshold
level in Volts for the frequency measurement, which can take values
from 0 to 1010 V. """,
validator=truncated_range,
values=[0, 1010]
)
frequency_aperature = Instrument.control(
":SENS:FREQ:APER?", ":SENS:FREQ:APER %g",
""" A floating point property that controls the frequency aperature in seconds,
which sets the integration period and measurement speed. Takes values
from 0.01 to 1.0 s. """,
validator=truncated_range,
values=[0.01, 1.0]
)
##############
# Period (s) #
##############
period = Instrument.measurement(":READ?",
""" Reads a period measurement in seconds, based on the
active :attr:`~.Keithley2000.mode`. """
)
period_reference = Instrument.control(
":SENS:PER:REF?", ":SENS:PER:REF %g",
""" A floating point property that controls the period reference value
in seconds, which can take values from 0 to 1 s. """,
validator=truncated_range,
values=[0, 1]
)
period_digits = Instrument.control(
":SENS:PER:DIG?", ":SENS:PER:DIG %d",
""" An integer property that controls the number of digits in the period
readings, which can take values from 4 to 7. """,
validator=truncated_discrete_set,
values=[4, 5, 6, 7],
cast=int
)
period_threshold = Instrument.control(
":SENS:PER:THR:VOLT:RANG?", ":SENS:PRE:THR:VOLT:RANG %g",
""" A floating point property that controls the voltage signal threshold
level in Volts for the period measurement, which can take values
from 0 to 1010 V. """,
validator=truncated_range,
values=[0, 1010]
)
period_aperature = Instrument.control(
":SENS:PER:APER?", ":SENS:PER:APER %g",
""" A floating point property that controls the period aperature in seconds,
which sets the integration period and measurement speed. Takes values
from 0.01 to 1.0 s. """,
validator=truncated_range,
values=[0.01, 1.0]
)
###################
# Temperature (C) #
###################
temperature = Instrument.measurement(":READ?",
""" Reads a temperature measurement in Celsius, based on the
active :attr:`~.Keithley2000.mode`. """
)
temperature_reference = Instrument.control(
":SENS:TEMP:REF?", ":SENS:TEMP:REF %g",
""" A floating point property that controls the temperature reference value
in Celsius, which can take values from -200 to 1372 C. """,
validator=truncated_range,
values=[-200, 1372]
)
temperature_nplc = Instrument.control(
":SENS:TEMP:NPLC?", ":SENS:TEMP:NPLC %g",
""" A floating point property that controls the number of power line cycles
(NPLC) for the temperature measurements, which sets the integration period
and measurement speed. Takes values from 0.01 to 10, where 0.1, 1, and 10 are
Fast, Medium, and Slow respectively. """
)
temperature_digits = Instrument.control(
":SENS:TEMP:DIG?", ":SENS:TEMP:DIG %d",
""" An integer property that controls the number of digits in the temperature
readings, which can take values from 4 to 7. """,
validator=truncated_discrete_set,
values=[4, 5, 6, 7],
cast=int
)
###########
# Trigger #
###########
trigger_count = Instrument.control(
":TRIG:COUN?", ":TRIG:COUN %d",
""" An integer property that controls the trigger count,
which can take values from 1 to 9,999. """,
validator=truncated_range,
values=[1, 9999],
cast=int
)
trigger_delay = Instrument.control(
":TRIG:SEQ:DEL?", ":TRIG:SEQ:DEL %g",
""" A floating point property that controls the trigger delay
in seconds, which can take values from 1 to 9,999,999.999 s. """,
validator=truncated_range,
values=[0, 999999.999]
)
def __init__(self, adapter, **kwargs):
super(Keithley2000, self).__init__(
adapter, "Keithley 2000 Multimeter", **kwargs
)
# Set up data transfer format
if isinstance(self.adapter, VISAAdapter):
self.adapter.config(
is_binary=False,
datatype='float32',
converter='f',
separator=','
)
def measure_voltage(self, max_voltage=1, ac=False):
""" Configures the instrument to measure voltage,
based on a maximum voltage to set the range, and
a boolean flag to determine if DC or AC is required.
:param max_voltage: A voltage in Volts to set the voltage range
:param ac: False for DC voltage, and True for AC voltage
"""
if ac:
self.mode = 'voltage ac'
self.voltage_ac_range = max_voltage
else:
self.mode = 'voltage'
self.voltage_range = max_voltage
def measure_current(self, max_current=10e-3, ac=False):
""" Configures the instrument to measure current,
based on a maximum current to set the range, and
a boolean flag to determine if DC or AC is required.
:param max_current: A current in Volts to set the current range
:param ac: False for DC current, and True for AC current
"""
if ac:
self.mode = 'current ac'
self.current_ac_range = max_current
else:
self.mode = 'current'
self.current_range = max_current
def measure_resistance(self, max_resistance=10e6, wires=2):
""" Configures the instrument to measure voltage,
based on a maximum voltage to set the range, and
a boolean flag to determine if DC or AC is required.
:param max_voltage: A voltage in Volts to set the voltage range
:param ac: False for DC voltage, and True for AC voltage
"""
if wires == 2:
self.mode = 'resistance'
self.resistance_range = max_resistance
elif wires == 4:
self.mode = 'resistance 4W'
self.resistance_4W_range = max_resistance
else:
raise ValueError("Keithley 2000 only supports 2 or 4 wire"
"resistance meaurements.")
def measure_period(self):
""" Configures the instrument to measure the period. """
self.mode = 'period'
def measure_frequency(self):
""" Configures the instrument to measure the frequency. """
self.mode = 'frequency'
def measure_temperature(self):
""" Configures the instrument to measure the temperature. """
self.mode = 'temperature'
def measure_diode(self):
""" Configures the instrument to perform diode testing. """
self.mode = 'diode'
def measure_continuity(self):
""" Configures the instrument to perform continuity testing. """
self.mode = 'continuity'
def _mode_command(self, mode=None):
if mode is None:
mode = self.mode
return self.MODES[mode]
def auto_range(self, mode=None):
""" Sets the active mode to use auto-range,
or can set another mode by its name.
:param mode: A valid :attr:`~.Keithley2000.mode` name, or None for the active mode
"""
self.write(":SENS:%s:RANG:AUTO 1" % self._mode_command(mode))
def enable_reference(self, mode=None):
""" Enables the reference for the active mode,
or can set another mode by its name.
:param mode: A valid :attr:`~.Keithley2000.mode` name, or None for the active mode
"""
self.write(":SENS:%s:REF:STAT 1" % self._mode_command(mode))
def disable_reference(self, mode=None):
""" Disables the reference for the active mode,
or can set another mode by its name.
:param mode: A valid :attr:`~.Keithley2000.mode` name, or None for the active mode
"""
self.write(":SENS:%s:REF:STAT 0" % self._mode_command(mode))
def acquire_reference(self, mode=None):
""" Sets the active value as the reference for the active mode,
or can set another mode by its name.
:param mode: A valid :attr:`~.Keithley2000.mode` name, or None for the active mode
"""
self.write(":SENS:%s:REF:ACQ" % self._mode_command(mode))
def enable_filter(self, mode=None, type='repeat', count=1):
""" Enables the averaging filter for the active mode,
or can set another mode by its name.
:param mode: A valid :attr:`~.Keithley2000.mode` name, or None for the active mode
:param type: The type of averaging filter, either 'repeat' or 'moving'.
:param count: A number of averages, which can take take values from 1 to 100
"""
self.write(":SENS:%s:AVER:STAT 1")
self.write(":SENS:%s:AVER:TCON %s")
self.write(":SENS:%s:AVER:COUN %d")
def disable_filter(self, mode=None):
""" Disables the averaging filter for the active mode,
or can set another mode by its name.
:param mode: A valid :attr:`~.Keithley2000.mode` name, or None for the active mode
"""
self.write(":SENS:%s:AVER:STAT 0" % self._mode_command(mode))
def local(self):
""" Returns control to the instrument panel, and enables
the panel if disabled. """
self.write(":SYST:LOC")
def remote(self):
""" Places the instrument in the remote state, which is
does not need to be explicity called in general. """
self.write(":SYST:REM")
def remote_lock(self):
""" Disables and locks the front panel controls to prevent
changes during remote operations. This is disabled by
calling :meth:`~.Keithley2000.local`. """
self.write(":SYST:RWL")
def reset(self):
""" Resets the instrument state. """
self.write(":STAT:QUEUE:CLEAR;*RST;:STAT:PRES;:*CLS;")
def beep(self, frequency, duration):
""" Sounds a system beep.
:param frequency: A frequency in Hz between 65 Hz and 2 MHz
:param duration: A time in seconds between 0 and 7.9 seconds
"""
self.write(":SYST:BEEP %g, %g" % (frequency, duration))
|
|
from redis import Redis, StrictRedis, ConnectionPool
from leaderboard.leaderboard import Leaderboard
import unittest
import time
import sure
class LeaderboardTest(unittest.TestCase):
def setUp(self):
self.leaderboard = Leaderboard('name', decode_responses=True)
def tearDown(self):
self.leaderboard.redis_connection.flushdb()
Leaderboard.MEMBER_KEY = 'member'
Leaderboard.SCORE_KEY = 'score'
Leaderboard.RANK_KEY = 'rank'
Leaderboard.MEMBER_DATA_KEY = 'member_data'
def test_version(self):
Leaderboard.VERSION.should.equal('3.7.3')
def test_init_with_defaults(self):
'name'.should.equal(self.leaderboard.leaderboard_name)
len(self.leaderboard.options).should.be(2)
self.leaderboard.options['connection_pool'].should.be.a(ConnectionPool)
self.leaderboard.redis_connection.should.be.a(Redis)
self.leaderboard.DEFAULT_PAGE_SIZE.should.equal(
self.leaderboard.page_size)
def test_init_sets_page_size_to_default_if_set_to_invalid_value(self):
self.leaderboard = Leaderboard('name', page_size=0)
self.leaderboard.page_size.should.equal(Leaderboard.DEFAULT_PAGE_SIZE)
def test_init_uses_connection_pooling(self):
lb0 = Leaderboard('lb0', db=0)
lb1 = Leaderboard('lb1', db=0)
lb2 = Leaderboard('lb2', db=1)
lb0.redis_connection.connection_pool.should.equal(
lb1.redis_connection.connection_pool)
lb0.redis_connection.connection_pool.should_not.equal(
lb2.redis_connection.connection_pool)
def test_init_uses_connection(self):
lb = Leaderboard('lb0', connection=Redis(db=1))
lb.redis_connection.connection_pool.connection_kwargs[
'db'].should.equal(1)
lb = Leaderboard('lb1', connection=StrictRedis(db=1))
lb.redis_connection.connection_pool.connection_kwargs[
'db'].should.equal(1)
def test_delete_leaderboard(self):
self.__rank_members_in_leaderboard()
self.leaderboard.redis_connection.exists('name').should.be.true
self.leaderboard.delete_leaderboard()
self.leaderboard.redis_connection.exists('name').should.be.false
def test_member_data_for(self):
self.__rank_members_in_leaderboard()
self.leaderboard.member_data_for('member_1').should.eql(
str({'member_name': 'Leaderboard member 1'}))
def test_members_data_for(self):
self.__rank_members_in_leaderboard()
members_data = self.leaderboard.members_data_for(['member_1', 'member_3'])
members_data[0].should.eql(str({'member_name': 'Leaderboard member 1'}))
members_data[1].should.eql(str({'member_name': 'Leaderboard member 3'}))
def test_update_member_data(self):
self.__rank_members_in_leaderboard()
self.leaderboard.update_member_data(
'member_1', {
'member_name': 'Updated Leaderboard member 1'})
self.leaderboard.member_data_for('member_1').should.eql(
str({'member_name': 'Updated Leaderboard member 1'}))
def test_remove_member_data(self):
self.__rank_members_in_leaderboard()
self.leaderboard.remove_member_data('member_1')
self.leaderboard.member_data_for('member_1').should.be(None)
def test_total_members(self):
self.__rank_members_in_leaderboard()
self.leaderboard.total_members().should.equal(5)
def test_remove_member(self):
self.__rank_members_in_leaderboard()
self.leaderboard.total_members().should.equal(5)
self.leaderboard.remove_member('member_1')
self.leaderboard.total_members().should.equal(4)
def test_remove_member_also_removes_member_data(self):
self.__rank_members_in_leaderboard()
self.leaderboard.redis_connection.exists(
"name:member_data").should.be.true
len(self.leaderboard.redis_connection.hgetall(
"name:member_data")).should.equal(5)
self.leaderboard.total_members().should.equal(5)
self.leaderboard.remove_member('member_1')
self.leaderboard.redis_connection.exists(
"name:member_data").should.be.true
len(self.leaderboard.redis_connection.hgetall(
"name:member_data")).should.equal(4)
self.leaderboard.total_members().should.equal(4)
def test_total_pages(self):
self.__rank_members_in_leaderboard(27)
self.leaderboard.total_members().should.equal(26)
self.leaderboard.total_pages().should.equal(2)
def test_total_members_in_score_range(self):
self.__rank_members_in_leaderboard()
self.leaderboard.total_members_in_score_range(2, 4).should.equal(3)
def test_score_for(self):
self.__rank_members_in_leaderboard()
self.leaderboard.score_for('member_5').should.equal(5.0)
self.leaderboard.score_for('jones').should.be(None)
def test_check_member(self):
self.__rank_members_in_leaderboard()
self.leaderboard.check_member('member_3').should.be.true
self.leaderboard.check_member('member_6').should.be.false
def test_rank_for(self):
self.__rank_members_in_leaderboard()
self.leaderboard.rank_for('member_5').should.equal(1)
def test_change_score_for(self):
self.__rank_members_in_leaderboard()
self.leaderboard.change_score_for('member_1', 99)
self.leaderboard.rank_for('member_1').should.equal(1)
self.leaderboard.score_for('member_1').should.equal(100.0)
def test_change_score_for_and_member_data_for_a_member(self):
self.leaderboard.change_score_for('member_1', 5, 'optional-data')
self.leaderboard.score_for('member_1').should.equal(5.0)
self.leaderboard.member_data_for('member_1').should.equal('optional-data')
def test_score_and_rank_for(self):
self.__rank_members_in_leaderboard()
score_and_rank = self.leaderboard.score_and_rank_for('member_3')
score_and_rank['member'].should.equal('member_3')
score_and_rank['score'].should.equal(3.0)
score_and_rank['rank'].should.equal(3)
score_and_rank = self.leaderboard.score_and_rank_for('jones')
score_and_rank['member'].should.equal('jones')
score_and_rank['score'].should.be(None)
score_and_rank['rank'].should.be(None)
def test_remove_members_in_score_range(self):
self.__rank_members_in_leaderboard()
self.leaderboard.total_members().should.equal(5)
self.leaderboard.remove_members_in_score_range(2, 4)
self.leaderboard.total_members().should.equal(2)
def test_remove_members_outside_rank(self):
self.__rank_members_in_leaderboard()
self.leaderboard.total_members().should.equal(5)
self.leaderboard.remove_members_outside_rank(3).should.equal(2)
leaders = self.leaderboard.leaders(1)
len(leaders).should.equal(3)
leaders[0]['member'].should.equal('member_5')
leaders[2]['member'].should.equal('member_3')
self.leaderboard.order = Leaderboard.ASC
self.__rank_members_in_leaderboard()
self.leaderboard.total_members().should.equal(5)
self.leaderboard.remove_members_outside_rank(3).should.equal(2)
leaders = self.leaderboard.leaders(1)
len(leaders).should.equal(3)
leaders[0]['member'].should.equal('member_1')
leaders[2]['member'].should.equal('member_3')
def test_page_for(self):
self.leaderboard.page_for('jones').should.equal(0)
self.__rank_members_in_leaderboard(21)
self.leaderboard.page_for('member_17').should.equal(1)
self.leaderboard.page_for('member_11').should.equal(1)
self.leaderboard.page_for('member_10').should.equal(1)
self.leaderboard.page_for('member_1').should.equal(1)
self.leaderboard.page_for('member_17', 10).should.equal(1)
self.leaderboard.page_for('member_11', 10).should.equal(1)
self.leaderboard.page_for('member_10', 10).should.equal(2)
self.leaderboard.page_for('member_1', 10).should.equal(2)
def test_page_for_with_sort_option_ASC(self):
self.leaderboard.order = Leaderboard.ASC
self.leaderboard.page_for('jones').should.equal(0)
self.__rank_members_in_leaderboard(21)
self.leaderboard.page_for('member_10', 10).should.equal(1)
self.leaderboard.page_for('member_1', 10).should.equal(1)
self.leaderboard.page_for('member_17', 10).should.equal(2)
self.leaderboard.page_for('member_11', 10).should.equal(2)
def test_percentile_for(self):
self.__rank_members_in_leaderboard(13)
self.leaderboard.percentile_for('member_1').should.eql(0.0)
self.leaderboard.percentile_for('member_2').should.eql(9.0)
self.leaderboard.percentile_for('member_3').should.eql(17.0)
self.leaderboard.percentile_for('member_4').should.eql(25.0)
self.leaderboard.percentile_for('member_12').should.eql(92.0)
def test_score_for_percentile(self):
self.__rank_members_in_leaderboard(6)
self.leaderboard.score_for_percentile(0).should.eql(1.0)
self.leaderboard.score_for_percentile(75).should.eql(4.0)
self.leaderboard.score_for_percentile(87.5).should.eql(4.5)
self.leaderboard.score_for_percentile(93.75).should.eql(4.75)
self.leaderboard.score_for_percentile(100).should.eql(5.0)
def test_score_for_percentile_with_sort_option_ASC(self):
self.leaderboard.order = Leaderboard.ASC
self.__rank_members_in_leaderboard(6)
self.leaderboard.score_for_percentile(0).should.eql(5.0)
self.leaderboard.score_for_percentile(75).should.eql(2.0)
self.leaderboard.score_for_percentile(87.5).should.eql(1.5)
self.leaderboard.score_for_percentile(93.75).should.eql(1.25)
self.leaderboard.score_for_percentile(100).should.eql(1.0)
def test_expire_leaderboard(self):
self.__rank_members_in_leaderboard()
self.leaderboard.expire_leaderboard(3)
ttl = self.leaderboard.redis_connection.ttl(
self.leaderboard.leaderboard_name)
ttl.should.be.greater_than(1)
ttl = self.leaderboard.redis_connection.ttl(
'%s:member_data' %
self.leaderboard.leaderboard_name)
ttl.should.be.greater_than(1)
def test_expire_leaderboard_at(self):
self.__rank_members_in_leaderboard()
self.leaderboard.expire_leaderboard_at(int(time.time() + 10))
ttl = self.leaderboard.redis_connection.ttl(
self.leaderboard.leaderboard_name)
ttl.should.be.lower_than(11)
ttl = self.leaderboard.redis_connection.ttl(
'%s:member_data' %
self.leaderboard.leaderboard_name)
ttl.should.be.lower_than(11)
def test_leaders(self):
self.__rank_members_in_leaderboard(27)
leaders = self.leaderboard.leaders(1)
len(leaders).should.equal(25)
leaders[0]['member'].should.equal('member_26')
leaders[0]['rank'].should.equal(1)
leaders[24]['member'].should.equal('member_2')
leaders = self.leaderboard.leaders(2)
len(leaders).should.equal(1)
leaders[0]['member'].should.equal('member_1')
leaders[0]['rank'].should.equal(26)
leaders = self.leaderboard.leaders(1, page_size=5)
len(leaders).should.equal(5)
def test_leaders_with_optional_member_data(self):
self.__rank_members_in_leaderboard()
leaders = self.leaderboard.leaders(1, with_member_data=True)
len(leaders).should.equal(5)
leaders[0]['member'].should.equal('member_5')
leaders[0]['member_data'].should.equal(
str({'member_name': 'Leaderboard member 5'}))
def test_leaders_return_type(self):
leaders = self.leaderboard.leaders(1)
type(leaders).should.equal(type([]))
leaders.should.equal([])
def test_ranked_in_list_with_sort_by(self):
self.__rank_members_in_leaderboard(26)
leaders = self.leaderboard.ranked_in_list(
['member_25', 'member_1', 'member_15'], sort_by='score')
len(leaders).should.equal(3)
leaders[0]['member'].should.equal('member_1')
leaders[1]['member'].should.equal('member_15')
leaders[2]['member'].should.equal('member_25')
leaders = self.leaderboard.ranked_in_list(
['member_25', 'member_1', 'member_15'], sort_by='rank')
len(leaders).should.be(3)
leaders[0]['member'].should.equal('member_25')
leaders[1]['member'].should.equal('member_15')
leaders[2]['member'].should.equal('member_1')
def test_ranked_in_list(self):
self.__rank_members_in_leaderboard(27)
leaders = self.leaderboard.ranked_in_list(
['member_1', 'member_15', 'member_25'])
len(leaders).should.be(3)
leaders[0]['member'].should.equal('member_1')
leaders[1]['member'].should.equal('member_15')
leaders[2]['member'].should.equal('member_25')
leaders = self.leaderboard.ranked_in_list(
['member_200'], include_missing=False, with_member_data=True)
len(leaders).should.be(0)
def test_ranked_in_list_with_unknown_member(self):
self.__rank_members_in_leaderboard(27)
leaders = self.leaderboard.ranked_in_list(['jones'])
len(leaders).should.be(1)
leaders[0]['member'].should.equal('jones')
leaders[0]['score'].should.be(None)
leaders[0]['rank'].should.be(None)
def test_all_leaders(self):
self.__rank_members_in_leaderboard(26)
leaders = self.leaderboard.all_leaders()
len(leaders).should.be(25)
leaders[0]['member'].should.equal('member_25')
def test_members_from_score_range(self):
self.__rank_members_in_leaderboard(26)
members = self.leaderboard.members_from_score_range(10, 15)
member_15 = {
'member': 'member_15',
'score': 15.0,
'rank': 11
}
members[0].should.eql(member_15)
member_10 = {
'member': 'member_10',
'score': 10.0,
'rank': 16
}
members[5].should.eql(member_10)
def test_members_from_rank_range(self):
self.__rank_members_in_leaderboard(26)
members = self.leaderboard.members_from_rank_range(5, 9)
len(members).should.be(5)
members[0]['member'].should.eql('member_21')
members[0]['score'].should.equal(21.0)
members[4]['member'].should.eql('member_17')
members = self.leaderboard.members_from_rank_range(1, 1)
len(members).should.equal(1)
members[0]['member'].should.eql('member_25')
members = self.leaderboard.members_from_rank_range(1, 26)
len(members).should.equal(25)
members[0]['member'].should.eql('member_25')
members[0]['score'].should.equal(25.0)
members[24]['member'].should.eql('member_1')
def test_member_at(self):
self.__rank_members_in_leaderboard(51)
self.leaderboard.member_at(1)['rank'].should.equal(1)
self.leaderboard.member_at(1)['score'].should.equal(50.0)
self.leaderboard.member_at(26)['rank'].should.equal(26)
self.leaderboard.member_at(50)['rank'].should.equal(50)
self.leaderboard.member_at(51).should.equal(None)
self.leaderboard.member_at(1, with_member_data=True)['member_data'].should.eql(
str({'member_name': 'Leaderboard member 50'}))
self.leaderboard.member_at(-5).should.equal(None)
def test_around_me(self):
self.__rank_members_in_leaderboard(
Leaderboard.DEFAULT_PAGE_SIZE *
3 +
2)
self.leaderboard.total_members().should.be(
Leaderboard.DEFAULT_PAGE_SIZE *
3 +
1)
leaders_around_me = self.leaderboard.around_me('member_30')
(len(leaders_around_me) // 2).should.equal(self.leaderboard.page_size // 2)
leaders_around_me = self.leaderboard.around_me('member_1')
len(leaders_around_me).should.equal(self.leaderboard.page_size // 2 + 1)
leaders_around_me = self.leaderboard.around_me('member_76')
(len(leaders_around_me) // 2).should.equal(self.leaderboard.page_size // 2)
leaders_around_me = self.leaderboard.around_me('member_76', page_size=1)
(len(leaders_around_me) // 2).should.equal(0)
def test_members_only(self):
exp = [{'member': 'member_%d' % x} for x in reversed(range(1, 27))]
self.__rank_members_in_leaderboard(27)
leaders = self.leaderboard.leaders(1, members_only=True)
leaders.should.equal(exp[0:25])
leaders = self.leaderboard.leaders(2, members_only=True)
leaders.should.equal(exp[25:26])
members = self.leaderboard.all_leaders(members_only=True)
members.should.equal(exp)
members = self.leaderboard.members_from_score_range(
10,
15,
members_only=True)
members.should.equal(exp[11:17])
members = self.leaderboard.members_from_rank_range(
5,
9,
members_only=True)
members.should.equal(exp[4:9])
leaders_around_me = self.leaderboard.around_me(
'member_25',
page_size=3,
members_only=True)
leaders_around_me.should.equal(exp[0:3])
def test_merge_leaderboards(self):
foo_leaderboard = Leaderboard('foo')
bar_leaderboard = Leaderboard('bar')
foo_leaderboard.rank_member('foo_1', 1)
foo_leaderboard.rank_member('foo_2', 2)
bar_leaderboard.rank_member('bar_1', 1)
bar_leaderboard.rank_member('bar_2', 2)
bar_leaderboard.rank_member('bar_3', 5)
foo_leaderboard.merge_leaderboards('foobar', ['bar'], aggregate='SUM')
foobar_leaderboard = Leaderboard('foobar')
foobar_leaderboard.total_members().should.equal(5)
foobar_leaderboard.leaders(1)[0]['member'].should.equal('bar_3')
def test_intersect_leaderboards(self):
foo_leaderboard = Leaderboard('foo')
bar_leaderboard = Leaderboard('bar')
foo_leaderboard.rank_member('foo_1', 1)
foo_leaderboard.rank_member('foo_2', 2)
foo_leaderboard.rank_member('bar_3', 6)
bar_leaderboard.rank_member('bar_1', 3)
bar_leaderboard.rank_member('foo_1', 4)
bar_leaderboard.rank_member('bar_3', 5)
foo_leaderboard.intersect_leaderboards(
'foobar',
['bar'],
aggregate='SUM')
foobar_leaderboard = Leaderboard('foobar')
foobar_leaderboard.total_members().should.equal(2)
foobar_leaderboard.leaders(1)[0]['member'].should.equal('bar_3')
def test_rank_member_if(self):
def highscore_check(
self,
member,
current_score,
score,
member_data,
leaderboard_options):
if (current_score is None):
return True
if (score > current_score):
return True
return False
self.leaderboard.total_members().should.equal(0)
self.leaderboard.rank_member_if(highscore_check, 'david', 1337)
self.leaderboard.total_members().should.equal(1)
self.leaderboard.score_for('david').should.equal(1337.0)
self.leaderboard.rank_member_if(highscore_check, 'david', 1336)
self.leaderboard.score_for('david').should.equal(1337.0)
self.leaderboard.rank_member_if(highscore_check, 'david', 1338)
self.leaderboard.score_for('david').should.equal(1338.0)
def test_rank_members(self):
self.leaderboard.total_members().should.equal(0)
self.leaderboard.rank_members(['member_1', 1000, 'member_2', 3000])
self.leaderboard.total_members().should.equal(2)
def test_rank_member_across(self):
self.leaderboard.rank_member_across(
['highscores', 'more_highscores'], 'david', 50000, {'member_name': 'david'})
len(self.leaderboard.leaders_in('highscores', 1)).should.equal(1)
len(self.leaderboard.leaders_in('more_highscores', 1)).should.equal(1)
def test_custom_keys_for_member_score_rank_and_member_data(self):
Leaderboard.MEMBER_KEY = 'member_custom'
Leaderboard.SCORE_KEY = 'score_custom'
Leaderboard.RANK_KEY = 'rank_custom'
Leaderboard.MEMBER_DATA_KEY = 'member_data_custom'
self.__rank_members_in_leaderboard(26)
leaders = self.leaderboard.leaders(1, with_member_data=True)
len(leaders).should.equal(25)
leaders[0]['member_custom'].should.equal('member_25')
leaders[0]['score_custom'].should.equal(25.0)
leaders[0]['rank_custom'].should.equal(1)
leaders[0]['member_data_custom'].should.equal(
"{'member_name': 'Leaderboard member 25'}")
def test_can_use_StrictRedis_class_for_connection(self):
lb = Leaderboard('lb1', connection=StrictRedis(db=0))
lb.rank_member('david', 50.1)
lb.score_for('david').should.equal(50.1)
lb.rank_for('david').should.equal(1)
len(lb.leaders(1)).should.equal(1)
def test_can_set_member_data_namespace_option(self):
self.leaderboard = Leaderboard('name', member_data_namespace='md')
self.__rank_members_in_leaderboard()
self.leaderboard.redis_connection.exists(
"name:member_data").should.be.false
self.leaderboard.redis_connection.exists("name:md").should.be.true
def test_global_member_data_option(self):
self.leaderboard = Leaderboard('name', global_member_data=True)
self.__rank_members_in_leaderboard()
self.leaderboard.redis_connection.exists(
"name:member_data").should.be.false
self.leaderboard.redis_connection.exists(
"member_data").should.be.true
def test_retrieve_a_given_set_of_members_from_the_leaderboard_in_a_range_from_1_to_the_number_given(self):
self.__rank_members_in_leaderboard(26)
members = self.leaderboard.top(5)
len(members).should.equal(5)
members[0]['member'].should.equal('member_25')
members[0]['score'].should.equal(25.0)
members[4]['member'].should.equal('member_21')
members = self.leaderboard.top(1)
len(members).should.equal(1)
members[0]['member'].should.equal('member_25')
members = self.leaderboard.top(26)
len(members).should.equal(25)
members[0]['member'].should.equal('member_25')
members[0]['score'].should.equal(25.0)
members[24]['member'].should.equal('member_1')
def test_retrieve_a_given_set_of_members_from_the_named_leaderboard_in_a_range_from_1_to_the_number_given(self):
self.__rank_members_in_leaderboard(26)
members = self.leaderboard.top_in('name', 5)
len(members).should.equal(5)
members[0]['member'].should.equal('member_25')
members[0]['score'].should.equal(25.0)
members[4]['member'].should.equal('member_21')
members = self.leaderboard.top_in('name', 1)
len(members).should.equal(1)
members[0]['member'].should.equal('member_25')
members = self.leaderboard.top_in('name', 26)
len(members).should.equal(25)
members[0]['member'].should.equal('member_25')
members[0]['score'].should.equal(25.0)
members[24]['member'].should.equal('member_1')
def test_retrieve_a_given_set_of_members_from_the_leaderboard_in_a_range_from_1_to_the_number_given_with_sort_option_ASC(self):
self.leaderboard.order = Leaderboard.ASC
self.__rank_members_in_leaderboard(26)
members = self.leaderboard.top(5)
len(members).should.equal(5)
members[0]['member'].should.equal('member_1')
members[0]['score'].should.equal(1.0)
members[4]['member'].should.equal('member_5')
members = self.leaderboard.top(1)
len(members).should.equal(1)
members[0]['member'].should.equal('member_1')
members = self.leaderboard.top(26)
len(members).should.equal(25)
members[0]['member'].should.equal('member_1')
members[0]['score'].should.equal(1.0)
members[24]['member'].should.equal('member_25')
def test_retrieve_a_given_set_of_members_from_the_named_leaderboard_in_a_range_from_1_to_the_number_given_with_sort_option_ASC(self):
self.leaderboard.order = Leaderboard.ASC
self.__rank_members_in_leaderboard(26)
members = self.leaderboard.top_in('name', 5)
len(members).should.equal(5)
members[0]['member'].should.equal('member_1')
members[0]['score'].should.equal(1.0)
members[4]['member'].should.equal('member_5')
members = self.leaderboard.top_in('name', 1)
len(members).should.equal(1)
members[0]['member'].should.equal('member_1')
members = self.leaderboard.top_in('name', 26)
len(members).should.equal(25)
members[0]['member'].should.equal('member_1')
members[0]['score'].should.equal(1.0)
members[24]['member'].should.equal('member_25')
def test_allow_you_to_include_or_exclude_missing_members_using_the_include_missing_option(self):
self.__rank_members_in_leaderboard(26)
leaders = self.leaderboard.ranked_in_list(
['member_1', 'member_15', 'member_25', 'member_200'])
len(leaders).should.equal(4)
leaders[0]['member'].should.equal('member_1')
leaders[1]['member'].should.equal('member_15')
leaders[2]['member'].should.equal('member_25')
leaders[3]['member'].should.equal('member_200')
leaders = self.leaderboard.ranked_in_list(
['member_1', 'member_15', 'member_25', 'member_200'], include_missing=False)
len(leaders).should.equal(3)
leaders[0]['member'].should.equal('member_1')
leaders[1]['member'].should.equal('member_15')
leaders[2]['member'].should.equal('member_25')
def test_total_scores_in(self):
self.__rank_members_in_leaderboard(26)
self.leaderboard.total_scores().should.equal(325.0)
def test_ranked_in_list_with_include_missing_sort_by_rank_and_missing_members(self):
self.__rank_members_in_leaderboard(27)
leaders = self.leaderboard.ranked_in_list(
['member_1', 'member_81', 'member_25'], sort_by='rank')
len(leaders).should.equal(3)
leaders[0]['member'].should.equal('member_25')
leaders[1]['member'].should.equal('member_1')
leaders[2]['member'].should.equal('member_81')
self.leaderboard.order = Leaderboard.ASC
leaders = self.leaderboard.ranked_in_list(
['member_1', 'member_81', 'member_25'], sort_by='rank')
len(leaders).should.equal(3)
leaders[0]['member'].should.equal('member_81')
leaders[1]['member'].should.equal('member_1')
leaders[2]['member'].should.equal('member_25')
def test_ranked_in_list_with_include_missing_sort_by_score_and_missing_members(self):
self.__rank_members_in_leaderboard(27)
leaders = self.leaderboard.ranked_in_list(
['member_1', 'member_81', 'member_25'], sort_by='score')
len(leaders).should.equal(3)
leaders[0]['member'].should.equal('member_1')
leaders[1]['member'].should.equal('member_25')
leaders[2]['member'].should.equal('member_81')
self.leaderboard.order = Leaderboard.ASC
leaders = self.leaderboard.ranked_in_list(
['member_1', 'member_81', 'member_25'], sort_by='rank')
len(leaders).should.equal(3)
leaders[0]['member'].should.equal('member_81')
leaders[1]['member'].should.equal('member_1')
leaders[2]['member'].should.equal('member_25')
def test_ranked_in_list_with_include_missing_sort_by_score_and_negative_and_zero_score(self):
self.__rank_members_in_leaderboard()
self.leaderboard.rank_member('member_-1', -1)
self.leaderboard.rank_member('member_0', 0)
leaders = self.leaderboard.ranked_in_list(
['member_-1', 'member_0', 'member_1', 'member_3', 'member_200'], sort_by='score')
len(leaders).should.equal(5)
leaders[0]['member'].should.equal('member_-1')
leaders[1]['member'].should.equal('member_0')
leaders[2]['member'].should.equal('member_1')
leaders[3]['member'].should.equal('member_3')
leaders[4]['member'].should.equal('member_200')
def __rank_members_in_leaderboard(self, members_to_add=6):
for index in range(1, members_to_add):
self.leaderboard.rank_member(
'member_%s' %
index, index, {
'member_name': 'Leaderboard member %s' %
index})
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
from __future__ import print_function
import os, glob, optparse, re, shutil, subprocess, sys, string, time
libexecpath = os.path.abspath(sys.path[0] + '/../libexec')
sys.path.insert(0, libexecpath) # prepend to Python path
from stereo_utils import get_asp_version
import asp_system_utils
asp_system_utils.verify_python_version_is_supported()
job_pool = [];
# Global output folder variable
outputFolder = ""
def man(option, opt, value, parser):
print(parser.usage, file=sys.stderr)
print('''\
This program operates on LRO (.IMG) files, and performs the
following ISIS 3 operations:
* Converts to ISIS format (lronac2isis)
* Attaches SPICE information (spiceinit and spicefit)
* Performs radiometric calibration (lronaccal)
* lronacecho?
* Removes camera distortions from the CCD images (noproj)
* Performs jitter analysis (lrojitreg)
* Mosaics individual CCDs into one unified image file (handmos)
* Normalizes the mosaic (cubenorm)
''', file=sys.stderr)
sys.exit()
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def add_job( cmd, num_working_threads=4 ):
if ( len(job_pool) >= num_working_threads):
job_pool[0].wait();
job_pool.pop(0);
print(cmd)
job_pool.append( subprocess.Popen(cmd, shell=True, env=os.environ) );
def wait_on_all_jobs():
print("Waiting for jobs to finish")
while len(job_pool) > 0:
job_pool[0].wait();
job_pool.pop(0);
# Go through a list of cubes and sort them into left/right pairs
def build_cube_pairs(cubePaths):
pairDict = dict();
for cube in cubePaths:
print(cube)
m = re.search('\D*(\d+)(.).*',os.path.basename(cube))
number = m.group(1)
sideLetter = m.group(2)
if (number not in pairDict):
pairDict[number] = ['', ''];
# Store the path in the spot for either the left or right cube
if (sideLetter == "L"):
pairDict[number][0] = cube; # Left
else:
pairDict[number][1] = cube; # Right
return pairDict
def read_flatfile( flat ):
# Fail if the input file is not present
if not os.path.isfile(flat):
raise Exception('File ' + flat + ' is missing!')
averages = [0.0,0.0]
f = open(flat,'r')
for line in f:
if ( line.rfind("Average Sample Offset:") >= 0 ):
index = line.rfind("Offset:");
index_e = line.rfind("StdDev:");
crop = line[index+7:index_e];
if crop == " NULL ": # Check for null value
raise Exception('Null sample offset in file ' + flat)
averages[0] = float(crop);
elif ( line.rfind("Average Line Offset:") >= 0 ):
index = line.rfind("Offset:");
index_e = line.rfind("StdDev:");
crop = line[index+7:index_e];
if crop == " NULL ": # Check for null value
raise Exception('Null sample offset in file ' + flat)
averages[1] = float(crop);
elif ( line.rfind("Using IpFind result only:") >= 0 ):
index = line.rfind("only:");
if (line[index + 7] == 1):
print("Warning: This result based only on IpFind search.")
print(str(averages))
return averages
# Call lronac2isis on each input file, return list of output files.
def lronac2isis( img_files, threads, outputFolder ):
lronac2isis_cubs = []
for img in img_files:
# Expect to end in .IMG, change to end in .cub and move to output folder
newExtension = os.path.splitext(img)[0] + '.cub'
cubFilePath = os.path.join(outputFolder, os.path.basename(newExtension))
if( os.path.exists(cubFilePath) ):
print(cubFilePath + ' exists, skipping lronac2isis.')
else:
cmd = 'lronac2isis from='+ img +' to='+ cubFilePath
add_job(cmd, threads)
lronac2isis_cubs.append( cubFilePath )
wait_on_all_jobs()
return lronac2isis_cubs
# Call lronaccal on each input file, return list of output files.
def lronaccal( cub_files, threads, delete=False ):
lronaccal_cubs = []
for cub in cub_files:
# Expect to end in .cub, change to end in .lronaccal.cub
to_cub = os.path.splitext(cub)[0] + '.lronaccal.cub'
if( os.path.exists(to_cub) ):
print(to_cub + ' exists, skipping lronaccal.')
else:
cmd = 'lronaccal from='+ cub +' to='+ to_cub
add_job(cmd, threads)
lronaccal_cubs.append( to_cub )
wait_on_all_jobs()
if( delete ): # Delete all input .cub files and log files
for cub in cub_files:
os.remove( cub )
lronaccal_log_files = glob.glob( os.path.commonprefix(cub_files) + '*.lronaccal.log' )
for file in lronaccal_log_files:
os.remove( file )
return lronaccal_cubs
# Call lronacecho on each input file, return list of output files.
def lronacecho( cub_files, threads, delete=False ):
lronacecho_cubs = []
for cub in cub_files:
# Expect to end in .cub, change to end in .lronaccal.cub
to_cub = os.path.splitext(cub)[0] + '.lronacecho.cub'
if( os.path.exists(to_cub) ):
print(to_cub + ' exists, skipping lronacecho.')
else:
cmd = 'lronacecho from='+ cub +' to='+ to_cub
add_job(cmd, threads)
lronacecho_cubs.append( to_cub )
wait_on_all_jobs()
if( delete ): # Delete all input .cub files and log files
for cub in cub_files:
os.remove( cub )
return lronacecho_cubs
def spice( cub_files, threads):
for cub in cub_files:
cmd = 'spiceinit web=false from='+ cub
add_job(cmd, threads)
wait_on_all_jobs()
for cub in cub_files:
cmd = 'spicefit from='+ cub
add_job(cmd, threads)
wait_on_all_jobs()
return
# Returns true if the .cub LRONAC file has CROSSTRACK_SUMMING = 1
def isFileHalfRes(cubFilePath):
return False; # It looks like the normal pvl file works so use it in all cases
f = open(cubFilePath, 'r')
for line in f:
if ( line.rfind("CROSSTRACK_SUMMING") >= 0 ):
index = line.rfind("=");
crop = line[index+2];
result = (crop == "2")
f.close()
return result;
# Left file is/home/smcmich1 in index 0, right is in index 1
def noproj( file_pairs, threads, delete, fakePvl, outputFolder):
if fakePvl: # Generate temporary PVL files containing LRONAC definition
# - We need one for full-res mode, one for half-X-res mode.
fullResFilePath = os.path.join(outputFolder, 'noprojInstruments_fullRes.pvl')
if os.path.exists(fullResFilePath):
print(fullResFilePath + ' exists, using existing file.')
else: # Need to write the file
print('Generating LRONAC compatible .pvl file ' + fullResFilePath)
f = open(fullResFilePath, 'w')
f.write('Object = IdealInstrumentsSpecifications\n');
f.write(' UserName = auto\n');
f.write(' Created = 2013-07-18T13:42:00\n');
f.write(' LastModified = 2013-07-18T13:42:00\n\n');
f.write(' Group = "LUNAR RECONNAISSANCE ORBITER/NACL"\n');
f.write(' TransY = 16.8833\n')
f.write(' ItransS = -2411.9\n')
f.write(' TransX = 0.6475\n')
f.write(' ItransL = -92.5\n')
f.write(' DetectorSamples = 10000\n')
f.write(' End_Group\n\n')
f.write('End_Object\n')
f.write('End')
f.close()
halfResFilePath = os.path.join(outputFolder, 'noprojInstruments_halfRes.pvl')
if os.path.exists(halfResFilePath):
print(halfResFilePath + ' exists, using existing file.')
else: # Need to write the file
print('Generating LRONAC compatible .pvl file ' + halfResFilePath)
f = open(halfResFilePath, 'w')
f.write('Object = IdealInstrumentsSpecifications\n');
f.write(' UserName = auto\n');
f.write(' Created = 2013-07-18T13:42:00\n');
f.write(' LastModified = 2013-07-18T13:42:00\n\n');
f.write(' Group = "LUNAR RECONNAISSANCE ORBITER/NACL"\n');
f.write(' TransY = 16.8833\n')
f.write(' ItransS = -4823.8\n') # Halved
f.write(' TransX = 0.6475\n')
f.write(' ItransL = -185\n') # Halved
f.write(' DetectorSamples = 5000\n') # Halved
f.write(' End_Group\n\n')
f.write('End_Object\n')
f.write('End')
f.close()
noproj_pairs = dict();
for k, v in file_pairs.items():
noproj_pairs[k] = ['', ''];
for i in range(2): # Process left and right image
to_cub = os.path.splitext(v[i])[0] + '.noproj.cub'
noproj_pairs[k][i] = to_cub; # Add file to output list
if os.path.exists( to_cub ):
print(to_cub + ' exists, skipping noproj.')
else:
# Generate pvl command if needed
if fakePvl:
fileIsHalfRes = isFileHalfRes(v[0])
if fileIsHalfRes:
specsLine = ' specs=' + os.path.abspath(halfResFilePath) + ' ';
else: # Full resolution
specsLine = ' specs=' + os.path.abspath(fullResFilePath) + ' ';
else: # Use the default file
specsLine = '';
# Multiple noproj threads will create clashing temporary files
# so we need to make temporary directories to run each thread in.
tempDir = 'temp_' + str(k) + '_' + str(i)
tempDir = os.path.join(outputFolder, tempDir)
cmd = 'mkdir -p ' + tempDir + ' && ' \
+ 'cd ' + tempDir + ' && ' \
+ 'noproj from=' + os.path.abspath(v[i]) \
+ ' match=' + os.path.abspath(v[0]) \
+ specsLine \
+ ' to=' + os.path.abspath(to_cub) + ' && ' \
+ 'cd .. && rm -rf ' + tempDir
add_job(cmd, threads)
wait_on_all_jobs()
if( delete ): # Clean up input cube files
for v in file_pairs.values():
os.remove( v[0] );
os.remove( v[1] );
# if fakePvl: # These are not deleted in case this program is running in multiple threads
# os.remove( halfResFilePath );
# os.remove( fullResFilePath );
return noproj_pairs;
def lronacjitreg( noproj_pairs, threads, delete=False ):
boundsCommands = '--correlator-type 2 --kernel 15 15'
for k,v in noproj_pairs.items():
cmd = 'lronacjitreg ' + boundsCommands \
+ ' --output-log outputLog_'+str(k)+'.txt' \
+ ' '+ v[0] \
+ ' '+ v[1];
add_job(cmd, threads)
wait_on_all_jobs()
# Read in all the shift values from the output text files
averages = dict()
for k,v in noproj_pairs.items():
flat_file = 'outputLog_'+str(k)+'.txt'
print('Reading log file ' + flat_file)
averages[k] = read_flatfile( flat_file )
if delete:
os.remove( flat_file )
return averages
def mosaic( noproj_pairs, averages, threads ):
mosaicList = dict();
for k,v in noproj_pairs.items():
# Create mosaic output file
mosaicPath = os.path.splitext(v[0])[0] + '.mosaic.cub'
shutil.copy( v[0], mosaicPath ) # Copy the LE image to the output path
xOffset = -1*averages[k][0] # Sign convention changes here
yOffset = -1*averages[k][1]
handmos( v[1], mosaicPath,
str( int(round( xOffset )) ),
str( int(round( yOffset )) ),
threads )
mosaicList[k] = mosaicPath;
wait_on_all_jobs()
return mosaicList
def handmos( fromcub, tocub, outsamp, outline, threads ):
cmd = 'handmos from='+ fromcub +' mosaic='+ tocub \
+' outsample = '+ str(outsamp) \
+' outline = ' + str(outline) \
+' matchbandbin=FALSE priority=ontop';
add_job(cmd, threads);
return
def cubenorm( mosaicList, threads, delete=False ):
normedList = dict();
for k,v in mosaicList.items():
normedPath = os.path.splitext(v)[0] + '.norm.cub'
cmd = 'cubenorm from='+ v +' to='+ normedPath
add_job(cmd, threads);
normedList[k] = normedPath;
wait_on_all_jobs()
if( delete ): # Clean up input cube files
for v in mosaicList.values():
os.remove(v);
return normedList
def cropInputs(inputFiles, outputFolder, cropAmount, threads, delete=False):
outputPaths = []
for path in inputFiles:
# Expect to end in .IMG, change to end in .cub and move to output folder
newExtension = os.path.splitext(path)[0] + '.cropped.cub'
croppedPath = os.path.join(outputFolder, os.path.basename(newExtension))
cmd = 'crop from='+ path +' to='+ croppedPath + ' nlines=' + str(cropAmount)
add_job(cmd, threads)
outputPaths.append( croppedPath )
wait_on_all_jobs()
if delete:
for path in inputFiles:
os.remove(path)
return outputPaths
#--------------------------------------------------------------------------------
#TODO: Support for file based logging of results
def main():
try:
try:
usage = "usage: lronac2mosaic.py [--help][--manual][--crop][--threads N]" \
"[--keep] LRONAC.IMG-files\n " + get_asp_version()
parser = optparse.OptionParser(usage=usage)
parser.set_defaults(delete =True)
parser.set_defaults(cropAmount=0)
parser.set_defaults(threads=4)
parser.set_defaults(fakePvl=True)
parser.add_option("--manual", action="callback", callback=man,
help="Read the manual.")
parser.add_option("-o", "--output-dir", dest="outputFolder",
help="Output folder (default to input folder).",type="string")
parser.add_option("--stop-at-no-proj", dest="stop_no_proj", action="store_true",
help="Process the IMG files only to have SPICE attached.")
parser.add_option("--resume-at-no-proj", dest="resume_no_proj", action="store_true",
help="Pick back up after spiceinit has happened. This was noproj uses your new camera information")
parser.add_option("-c", "--crop", dest="cropAmount",
help="Process only the first N lines of the image.",type="int")
parser.add_option("-t", "--threads", dest="threads",
help="Number of threads to use.",type="int")
parser.add_option("-k", "--keep", action="store_false",
dest="delete",
help="Will not delete intermediate files.")
parser.add_option("--p", dest="fakePvl", action="store_true",
help="Don't automatically create a LRONAC pvl file")
(options, args) = parser.parse_args()
if not args: parser.error("need .IMG files")
except optparse.OptionError as msg:
raise Usage(msg)
# Make sure only one pair of cubes was passed in
input_file_pair = build_cube_pairs( args )
if len(input_file_pair) > 1:
raise Usage('Input error: Only one pair of input files are allowed!')
if not options.outputFolder: # Set the output folder equal to the input folder
options.outputFolder = os.path.dirname(args[0])
print('Using output folder: ' + options.outputFolder)
if not os.path.exists(options.outputFolder) and len(options.outputFolder) > 1:
os.makedirs(options.outputFolder)
print("Beginning processing.....")
if not options.resume_no_proj: # If not skipping to later point
print("lronac2isis") # Per-file operation, returns list of new files
lronac2isised = lronac2isis( args, options.threads, options.outputFolder )
print("lronaccal") # Per-file operation, returns list of new files
lronaccaled = lronaccal( lronac2isised, options.threads, options.delete )
print("lronacecho") # Per-file operation, returns list of new files
lronacechod = lronacecho( lronaccaled, options.threads, options.delete )
if (options.cropAmount > 0): # Crop the input files as soon as ISIS calls allow it
lronacechod = cropInputs(lronacechod, options.outputFolder, options.cropAmount,
options.threads, options.delete)
print("spice") # Attach spice info to cubes (adds to existing files)
spice( lronacechod, options.threads )
if options.stop_no_proj: # Stop early if requested
print("Finished")
return 0
if options.resume_no_proj: # If resume option was set
lronacechod = args
print("build_cube_pairs") # Detected corresponding pairs of cubes
lronac_file_pairs = build_cube_pairs( lronacechod )
print("noproj") # Per-file operation
noprojed_file_pairs = noproj( lronac_file_pairs, options.threads, options.delete, options.fakePvl, options.outputFolder)
print("lronacjitreg") # Determines mean shift for each file pair
averages = lronacjitreg( noprojed_file_pairs, options.threads, options.delete )
print("mosaic") # handmos - Use mean shifts to combine the file pairs
mosaicked = mosaic( noprojed_file_pairs, averages, options.threads )
# Clean up noproj files
if( options.delete ):
for cub in noprojed_file_pairs.values():
os.remove( cub[0] )
os.remove( cub[1] )
# Run a final cubenorm across the image:
cubenorm( mosaicked, options.threads, options.delete )
print("Finished")
return 0
except Usage as err:
print(err.msg, file=sys.stderr)
return 2
# To more easily debug this program, comment out this catch block.
# except Exception as err:
# sys.stderr.write( str(err) + '\n' )
# return 1
if __name__ == "__main__":
sys.exit(main())
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` acting like a diagonal matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorDiag",]
@tf_export("linalg.LinearOperatorDiag")
class LinearOperatorDiag(linear_operator.LinearOperator):
"""`LinearOperator` acting like a [batch] square diagonal matrix.
This operator acts like a [batch] diagonal matrix `A` with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
`LinearOperatorDiag` is initialized with a (batch) vector.
```python
# Create a 2 x 2 diagonal linear operator.
diag = [1., -1.]
operator = LinearOperatorDiag(diag)
operator.to_dense()
==> [[1., 0.]
[0., -1.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 4 linear operators.
diag = tf.random.normal(shape=[2, 3, 4])
operator = LinearOperatorDiag(diag)
# Create a shape [2, 1, 4, 2] vector. Note that this shape is compatible
# since the batch dimensions, [2, 1], are broadcast to
# operator.batch_shape = [2, 3].
y = tf.random.normal(shape=[2, 1, 4, 2])
x = operator.solve(y)
==> operator.matmul(x) = y
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [C1,...,Cc] + [N, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
```
#### Performance
Suppose `operator` is a `LinearOperatorDiag` of shape `[N, N]`,
and `x.shape = [N, R]`. Then
* `operator.matmul(x)` involves `N * R` multiplications.
* `operator.solve(x)` involves `N` divisions and `N * R` multiplications.
* `operator.determinant()` involves a size `N` `reduce_prod`.
If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
diag,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorDiag"):
r"""Initialize a `LinearOperatorDiag`.
Args:
diag: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.
The diagonal of the operator. Allowed dtypes: `float16`, `float32`,
`float64`, `complex64`, `complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `diag.dtype` is real, this is auto-set to `True`.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
TypeError: If `diag.dtype` is not an allowed type.
ValueError: If `diag.dtype` is real, and `is_self_adjoint` is not `True`.
"""
parameters = dict(
diag=diag,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name
)
with ops.name_scope(name, values=[diag]):
self._diag = linear_operator_util.convert_nonref_to_tensor(
diag, name="diag")
self._check_diag(self._diag)
# Check and auto-set hints.
if not self._diag.dtype.is_complex:
if is_self_adjoint is False:
raise ValueError("A real diagonal operator is always self adjoint.")
else:
is_self_adjoint = True
if is_square is False:
raise ValueError("Only square diagonal operators currently supported.")
is_square = True
super(LinearOperatorDiag, self).__init__(
dtype=self._diag.dtype,
graph_parents=None,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
# TODO(b/143910018) Remove graph_parents in V3.
self._set_graph_parents([self._diag])
def _check_diag(self, diag):
"""Static check of diag."""
if diag.shape.ndims is not None and diag.shape.ndims < 1:
raise ValueError("Argument diag must have at least 1 dimension. "
"Found: %s" % diag)
def _shape(self):
# If d_shape = [5, 3], we return [5, 3, 3].
d_shape = self._diag.shape
return d_shape.concatenate(d_shape[-1:])
def _shape_tensor(self):
d_shape = array_ops.shape(self._diag)
k = d_shape[-1]
return array_ops.concat((d_shape, [k]), 0)
@property
def diag(self):
return self._diag
def _assert_non_singular(self):
return linear_operator_util.assert_no_entries_with_modulus_zero(
self._diag,
message="Singular operator: Diagonal contained zero values.")
def _assert_positive_definite(self):
if self.dtype.is_complex:
message = (
"Diagonal operator had diagonal entries with non-positive real part, "
"thus was not positive definite.")
else:
message = (
"Real diagonal operator had non-positive diagonal entries, "
"thus was not positive definite.")
return check_ops.assert_positive(
math_ops.real(self._diag),
message=message)
def _assert_self_adjoint(self):
return linear_operator_util.assert_zero_imag_part(
self._diag,
message=(
"This diagonal operator contained non-zero imaginary values. "
" Thus it was not self-adjoint."))
def _matmul(self, x, adjoint=False, adjoint_arg=False):
diag_term = math_ops.conj(self._diag) if adjoint else self._diag
x = linalg.adjoint(x) if adjoint_arg else x
diag_mat = array_ops.expand_dims(diag_term, -1)
return diag_mat * x
def _matvec(self, x, adjoint=False):
diag_term = math_ops.conj(self._diag) if adjoint else self._diag
return diag_term * x
def _determinant(self):
return math_ops.reduce_prod(self._diag, axis=[-1])
def _log_abs_determinant(self):
log_det = math_ops.reduce_sum(
math_ops.log(math_ops.abs(self._diag)), axis=[-1])
if self.dtype.is_complex:
log_det = math_ops.cast(log_det, dtype=self.dtype)
return log_det
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
diag_term = math_ops.conj(self._diag) if adjoint else self._diag
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
inv_diag_mat = array_ops.expand_dims(1. / diag_term, -1)
return rhs * inv_diag_mat
def _to_dense(self):
return array_ops.matrix_diag(self._diag)
def _diag_part(self):
return self.diag
def _add_to_tensor(self, x):
x_diag = array_ops.matrix_diag_part(x)
new_diag = self._diag + x_diag
return array_ops.matrix_set_diag(x, new_diag)
def _eigvals(self):
return ops.convert_to_tensor_v2_with_dispatch(self.diag)
def _cond(self):
abs_diag = math_ops.abs(self.diag)
return (math_ops.reduce_max(abs_diag, axis=-1) /
math_ops.reduce_min(abs_diag, axis=-1))
|
|
#!/usr/bin/env python
#
#pylint: disable=line-too-long,invalid-name,missing-docstring,redefined-outer-name,too-many-arguments,too-many-locals,too-many-statements,logging-format-interpolation
#
import os
import sys
import time
import json
import subprocess
import logging as log
def setup_logging(loglevel=log.INFO):
"""initialize logging.
Currently only logging to stdout and not directly to the journal to avoid double-logging
since this script is intended to run from an ExecStartPost= of a systemd unit, its stdout
will already be aggregated with that unit's journal logs, and it makes sense to combine this
script's logs with the container unit's logs rather than making them separate.
This uses the standard python logger instead of titan.pantheon.twistedLog for future
portability and because we are not logging to the journal directly.
"""
log.basicConfig(level=loglevel, format='%(levelname)-8s: %(message)s')
def is_ipv4(addr):
"""return true if addr looks like an ipv4 address, false otherwise"""
if addr == '0/0' or '.' in addr:
return True
else:
return False
def is_ipv6(addr):
"""return true if addr looks like an ipv6 address, false otherwise"""
if addr == '0/0' or ':' in addr:
return True
else:
return False
def docker_inspect(container_name, max_attempts=20):
"""Runs `docker inspect <container_name>` and parses its json output, returning a python dict.
raises subprocess.CalledProcessError on non-zero exit status from docker
"""
attempts = 0
success = False
while attempts < max_attempts and not success:
try:
result = subprocess.check_output('docker inspect {}'.format(container_name),
stderr=subprocess.STDOUT,
shell=True)
success = True
except subprocess.CalledProcessError:
time.sleep(0.1) # 100ms!!
attempts += 1
if success:
return json.loads(result)[0]
else:
raise RuntimeError('Retries exhausted waiting for {} to become available'.format(container_name))
def wait_until_running(container_name, max_attempts=20):
attempts = 0
success = False
while attempts < max_attempts and not success:
container_data = docker_inspect(container_name)
if container_data['State']['Running'] is True:
return True
time.sleep(0.1)
attempts += 1
raise RuntimeError('timed out waiting for {} to enter running state.'.format(container_name))
def create_ipv4_nat_rule(chain, bridge, proto, host_port, container_ip, container_port):
"""return a iptables v4 nat rule for forwarding a host port to a container IP:port"""
return '-A {chain} ! -i {bridge} -p {proto} -m {proto}' \
' --dport {host_port} -j DNAT' \
' --to-destination {container_ip}:{container_port}'.format(chain=chain,
bridge=bridge,
proto=proto,
host_port=host_port,
container_ip=container_ip,
container_port=container_port)
def create_ipv4_filter_rule(container_ip, bridge, proto, container_port):
"""return a iptables v4 filter rule for forwarding a host port to a container IP:port"""
return '-A FORWARD -d {container_ip} ! -i {bridge} -o {bridge}' \
' -p {proto} -m {proto} --dport {container_port}'\
' -j ACCEPT\n'.format(container_ip=container_ip,
bridge=bridge,
proto=proto,
container_port=container_port)
def create_ipv6_nat_rule(chain, bridge, proto, host_port, container_ip, container_port):
"""return a iptables v6 nat rule for forwarding a host port to a container IP:port"""
return '-A {chain} ! -i {bridge} -p {proto} -m {proto}' \
' --dport {host_port} -j DNAT' \
' --to-destination [{container_ip}]:{container_port}'.format(chain=chain,
bridge=bridge,
proto=proto,
host_port=host_port,
container_ip=container_ip,
container_port=container_port)
def create_ipv6_filter_rule(container_ip, bridge, proto, container_port):
"""return a iptables v4 filter rule for forwarding a host port to a container IP:port"""
return '-A FORWARD -d {container_ip} ! -i {bridge} -o {bridge}' \
' -p {proto} -m {proto} --dport {container_port}'\
' -j ACCEPT\n'.format(container_ip=container_ip,
bridge=bridge,
proto=proto,
container_port=container_port)
def write_iptables_file(filename, nat_rules, filter_rules):
with open(filename, 'w') as f:
f.write('*nat\n')
for rule in nat_rules:
f.write(rule + '\n')
f.write('COMMIT\n')
f.write('*filter\n')
for rule in filter_rules:
f.write(rule + '\n')
f.write('COMMIT\n')
def remove_iptables_file(filename):
if os.path.exists(filename):
os.remove(filename)
def restart_iptables():
subprocess.call('systemctl restart iptables.service', shell=True)
def restart_ip6tables():
subprocess.call('systemctl restart ip6tables.service', shell=True)
def main(args):
"""args should be a Namespace() with following attributes:
- action (string): "create" or "delete"
- container_name (string): name of a docker container
- debug (bool): enable debug logging
- chain (string): iptables name to attach docker container rules onto
- ipv6 (bool): enable generating ip6tables (ipv6) rules in addition to ipv4
- iptables_dir (string): directory containing iptables rules files
- ip6tables_dir (string): directory containing ip6tables rules files
"""
loglevel = log.DEBUG if args.debug else log.INFO
setup_logging(loglevel)
log.debug(args)
container = args.container_name
chain = args.chain
enable_ipv6 = args.ipv6
iptables_file = os.path.join(args.iptables_dir, '11-docker-container_' + container)
ip6tables_file = os.path.join(args.ip6tables_dir, '11-docker-container_' + container)
if args.action == 'create':
nat4_rules = []
filter4_rules = []
nat6_rules = []
filter6_rules = []
try:
wait_until_running(container)
except RuntimeError as e:
log.error('Error occurred while waiting for container to start: {}'.format(e))
sys.exit(1)
try:
container_data = docker_inspect(container)
except RuntimeError as e:
log.error('Error retrieving container data, container: {}": {}'.format(container, e))
sys.exit(1)
network = container_data['NetworkSettings']
container_ip = network['IPAddress']
bridge = network['Bridge']
mappings = network['Ports']
if len(mappings) == 0:
log.info('container {} does not have any port mappings, nothing to do.'.format(container))
sys.exit(0)
for (container_map, host_map) in mappings.iteritems():
# `container_map` example: (String): "5000/tcp"
# `host_map` example: list of single dict: [{u'HostPort': u'5001', u'HostIp': u'0.0.0.0'}]
# host_map may also be None in the case of an EXPOSED port that is not published.
if not host_map:
log.debug('exposed port {} is not published, skipping.'.format(container_map))
continue
(container_port, proto) = container_map.split('/')
host_ip = host_map[0]['HostIp']
host_port = host_map[0]['HostPort']
# convert 0.0.0.0 to 0/0 which is accepted by both iptables and ip6tables
host_ip = '0/0' if host_ip == '0.0.0.0' else host_ip
# iptables (ipv4) rules
if is_ipv4(host_ip):
nat4_rules.append(create_ipv4_nat_rule(chain, bridge, proto, host_port, container_ip, container_port))
filter4_rules.append(create_ipv4_filter_rule(container_ip, bridge, proto, container_port))
# ip6tables (ipv6) rules
# NOTE: ipv6 nat'ing is a weird concept but linux supports it since 3.7+. At this time
# Docker (1.2) does not seem to support it well but we at least want to support
# the case of making our apps available on the host's ipv6 addr, so we try to make
# some rules to publish our services on ipv4 and ipv6.
if enable_ipv6 and is_ipv6(host_ip):
nat6_rules.append(create_ipv6_nat_rule(chain, bridge, proto, host_port, container_ip, container_port))
filter6_rules.append(create_ipv6_filter_rule(container_ip, bridge, proto, container_port))
log.debug('nat4_rules:\n{}\n'.format(nat4_rules))
log.debug('filter4_rules:\n{}\n'.format(filter4_rules))
log.debug('nat6_rules:\n{}\n'.format(nat6_rules))
log.debug('filter6_rules:\n{}\n'.format(filter6_rules))
log.info('Writing iptables rules to {} and initiating iptables reload'.format(iptables_file))
write_iptables_file(iptables_file, nat4_rules, filter4_rules)
restart_iptables()
if enable_ipv6:
log.info('Writing ipt6ables rules to {} and initiating iptables reload'.format(ip6tables_file))
write_iptables_file(ip6tables_file, nat6_rules, filter6_rules)
restart_ip6tables()
if args.action == 'delete':
log.info('Deleting iptables rule files: {}, {}'.format(iptables_file, ip6tables_file))
remove_iptables_file(iptables_file)
remove_iptables_file(ip6tables_file)
log.info('reloading iptables rules')
restart_iptables()
if enable_ipv6:
restart_ip6tables()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# required, positional args
parser.add_argument('action', choices=['create', 'delete'])
parser.add_argument('container_name', help='name of container')
# optional, flag based args
parser.add_argument('--iptables-dir', help='Directory to store iptables files', default='/etc/iptables.d')
parser.add_argument('--ip6tables-dir', help='Directory to store ip6tables files', default='/etc/ip6tables.d')
parser.add_argument('--chain', help='Name of docker iptables chain', default='DOCKER_CONTAINERS')
parser.add_argument('--ipv6', help='Enable ip6tables. Experimental. Docker ipv6 support requires the lxc exec driver.', action='store_true')
parser.add_argument('--debug', help='Enable debug logging.', action='store_true')
args = parser.parse_args()
main(args)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Davis Phillips davis.phillips@gmail.com
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_resource_pool
short_description: Add/remove resource pools to/from vCenter
description:
- This module can be used to add/remove a resource pool to/from vCenter
version_added: 2.3
author:
- Davis Phillips (@dav1x)
notes:
- Tested on vSphere 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
datacenter:
description:
- Name of the datacenter to add the host.
required: True
type: str
cluster:
description:
- Name of the cluster to add the host.
required: True
type: str
resource_pool:
description:
- Resource pool name to manage.
required: True
type: str
cpu_expandable_reservations:
description:
- In a resource pool with an expandable reservation, the reservation on a resource pool can grow beyond the specified value.
default: True
type: bool
cpu_reservation:
description:
- Amount of resource that is guaranteed available to the virtual machine or resource pool.
default: 0
type: int
cpu_limit:
description:
- The utilization of a virtual machine/resource pool will not exceed this limit, even if there are available resources.
- The default value -1 indicates no limit.
default: -1
type: int
cpu_shares:
description:
- Memory shares are used in case of resource contention.
choices:
- high
- custom
- low
- normal
default: normal
type: str
mem_expandable_reservations:
description:
- In a resource pool with an expandable reservation, the reservation on a resource pool can grow beyond the specified value.
default: True
type: bool
mem_reservation:
description:
- Amount of resource that is guaranteed available to the virtual machine or resource pool.
default: 0
type: int
mem_limit:
description:
- The utilization of a virtual machine/resource pool will not exceed this limit, even if there are available resources.
- The default value -1 indicates no limit.
default: -1
type: int
mem_shares:
description:
- Memory shares are used in case of resource contention.
choices:
- high
- custom
- low
- normal
default: normal
type: str
state:
description:
- Add or remove the resource pool
default: 'present'
choices:
- 'present'
- 'absent'
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Add resource pool to vCenter
vmware_resource_pool:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter: '{{ datacenter_name }}'
cluster: '{{ cluster_name }}'
resource_pool: '{{ resource_pool_name }}'
mem_shares: normal
mem_limit: -1
mem_reservation: 0
mem_expandable_reservations: yes
cpu_shares: normal
cpu_limit: -1
cpu_reservation: 0
cpu_expandable_reservations: yes
state: present
delegate_to: localhost
'''
RETURN = """
instance:
description: metadata about the new resource pool
returned: always
type: dict
sample: None
"""
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils.vmware import get_all_objs, connect_to_api, vmware_argument_spec, find_datacenter_by_name, \
find_cluster_by_name, wait_for_task, find_host_by_cluster_datacenter
from ansible.module_utils.basic import AnsibleModule
class VMwareResourcePool(object):
def __init__(self, module):
self.module = module
self.datacenter = module.params['datacenter']
self.cluster = module.params['cluster']
self.resource_pool = module.params['resource_pool']
self.hostname = module.params['hostname']
self.username = module.params['username']
self.password = module.params['password']
self.state = module.params['state']
self.mem_shares = module.params['mem_shares']
self.mem_limit = module.params['mem_limit']
self.mem_reservation = module.params['mem_reservation']
self.mem_expandable_reservations = module.params[
'cpu_expandable_reservations']
self.cpu_shares = module.params['cpu_shares']
self.cpu_limit = module.params['cpu_limit']
self.cpu_reservation = module.params['cpu_reservation']
self.cpu_expandable_reservations = module.params[
'cpu_expandable_reservations']
self.dc_obj = None
self.cluster_obj = None
self.host_obj = None
self.resource_pool_obj = None
self.content = connect_to_api(module)
def select_resource_pool(self, host):
pool_obj = None
resource_pools = get_all_objs(self.content, [vim.ResourcePool])
pool_selections = self.get_obj(
[vim.ResourcePool],
self.resource_pool,
return_all=True
)
if pool_selections:
for p in pool_selections:
if p in resource_pools:
pool_obj = p
break
return pool_obj
def get_obj(self, vimtype, name, return_all=False):
obj = list()
container = self.content.viewManager.CreateContainerView(
self.content.rootFolder, vimtype, True)
for c in container.view:
if name in [c.name, c._GetMoId()]:
if return_all is False:
return c
else:
obj.append(c)
if len(obj) > 0:
return obj
else:
# for backwards-compat
return None
def process_state(self):
try:
rp_states = {
'absent': {
'present': self.state_remove_rp,
'absent': self.state_exit_unchanged,
},
'present': {
'present': self.state_exit_unchanged,
'absent': self.state_add_rp,
}
}
rp_states[self.state][self.check_rp_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_remove_rp(self):
changed = True
result = None
resource_pool = self.select_resource_pool(self.host_obj)
try:
task = self.resource_pool_obj.Destroy()
success, result = wait_for_task(task)
except Exception:
self.module.fail_json(msg="Failed to remove resource pool '%s' '%s'" % (
self.resource_pool, resource_pool))
self.module.exit_json(changed=changed, result=str(result))
def state_add_rp(self):
changed = True
rp_spec = vim.ResourceConfigSpec()
cpu_alloc = vim.ResourceAllocationInfo()
cpu_alloc.expandableReservation = self.cpu_expandable_reservations
cpu_alloc.limit = int(self.cpu_limit)
cpu_alloc.reservation = int(self.cpu_reservation)
cpu_alloc_shares = vim.SharesInfo()
cpu_alloc_shares.level = self.cpu_shares
cpu_alloc.shares = cpu_alloc_shares
rp_spec.cpuAllocation = cpu_alloc
mem_alloc = vim.ResourceAllocationInfo()
mem_alloc.limit = int(self.mem_limit)
mem_alloc.expandableReservation = self.mem_expandable_reservations
mem_alloc.reservation = int(self.mem_reservation)
mem_alloc_shares = vim.SharesInfo()
mem_alloc_shares.level = self.mem_shares
mem_alloc.shares = mem_alloc_shares
rp_spec.memoryAllocation = mem_alloc
self.dc_obj = find_datacenter_by_name(self.content, self.datacenter)
if self.dc_obj is None:
self.module.fail_json(msg="Unable to find datacenter with name %s" % self.datacenter)
self.cluster_obj = find_cluster_by_name(self.content, self.cluster, datacenter=self.dc_obj)
if self.cluster_obj is None:
self.module.fail_json(msg="Unable to find cluster with name %s" % self.cluster)
rootResourcePool = self.cluster_obj.resourcePool
rootResourcePool.CreateResourcePool(self.resource_pool, rp_spec)
self.module.exit_json(changed=changed)
def check_rp_state(self):
self.host_obj, self.cluster_obj = find_host_by_cluster_datacenter(self.module, self.content, self.datacenter,
self.cluster, self.hostname)
self.resource_pool_obj = self.select_resource_pool(self.host_obj)
if self.resource_pool_obj is None:
return 'absent'
else:
return 'present'
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(datacenter=dict(required=True, type='str'),
cluster=dict(required=True, type='str'),
resource_pool=dict(required=True, type='str'),
mem_shares=dict(type='str', default="normal", choices=[
'high', 'custom', 'normal', 'low']),
mem_limit=dict(type='int', default=-1),
mem_reservation=dict(type='int', default=0),
mem_expandable_reservations=dict(
type='bool', default="True"),
cpu_shares=dict(type='str', default="normal", choices=[
'high', 'custom', 'normal', 'low']),
cpu_limit=dict(type='int', default=-1),
cpu_reservation=dict(type='int', default=0),
cpu_expandable_reservations=dict(
type='bool', default="True"),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmware_rp = VMwareResourcePool(module)
vmware_rp.process_state()
if __name__ == '__main__':
main()
|
|
from logging import getLogger
from numpy import zeros, rollaxis, indices
import traceback
from os.path import join
from collections import defaultdict
from pprint import pformat
import os
NOTEX = (0x1F0, 0x1F0)
import yaml
log = getLogger(__name__)
class Block(object):
"""
Value object representing an (id, data) pair.
Provides elements of its parent material's block arrays.
Blocks will have (name, ID, blockData, aka, color, brightness, opacity, blockTextures)
"""
def __str__(self):
return "<Block {name} ({id}:{data}) hasVariants:{ha}>".format(
name=self.name, id=self.ID, data=self.blockData, ha=self.hasVariants)
def __repr__(self):
return str(self)
def __cmp__(self, other):
if not isinstance(other, Block):
return -1
key = lambda a: a and (a.ID, a.blockData)
return cmp(key(self), key(other))
hasVariants = False # True if blockData defines additional blocktypes
def __init__(self, materials, blockID, blockData=0):
self.materials = materials
self.ID = blockID
self.blockData = blockData
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
if attr == "name":
r = self.materials.names[self.ID]
else:
r = getattr(self.materials, attr)[self.ID]
if attr in ("name", "aka", "color", "type"):
r = r[self.blockData]
return r
id_limit = 4096
class MCMaterials(object):
defaultColor = (0xc9, 0x77, 0xf0, 0xff)
defaultBrightness = 0
defaultOpacity = 15
defaultTexture = NOTEX
defaultTex = [t // 16 for t in defaultTexture]
def __init__(self, defaultName="Unused Block"):
object.__init__(self)
self.yamlDatas = []
self.defaultName = defaultName
self.blockTextures = zeros((id_limit, 16, 6, 2), dtype='uint16')
self.blockTextures[:] = self.defaultTexture
self.names = [[defaultName] * 16 for i in range(id_limit)]
self.aka = [[""] * 16 for i in range(id_limit)]
#Sets terrain.png array size
self.type = [["NORMAL"] * 16] * id_limit
self.blocksByType = defaultdict(list)
self.allBlocks = []
self.blocksByID = {}
self.lightEmission = zeros(id_limit, dtype='uint8')
self.lightEmission[:] = self.defaultBrightness
self.lightAbsorption = zeros(id_limit, dtype='uint8')
self.lightAbsorption[:] = self.defaultOpacity
self.flatColors = zeros((id_limit, 16, 4), dtype='uint8')
self.flatColors[:] = self.defaultColor
self.idStr = [""] * id_limit
self.color = self.flatColors
self.brightness = self.lightEmission
self.opacity = self.lightAbsorption
self.Air = self.addBlock(0,
name="Air",
texture=(0x80, 0xB0),
opacity=0,
)
def __repr__(self):
return "<MCMaterials ({0})>".format(self.name)
@property
def AllStairs(self):
return [b for b in self.allBlocks if b.name.endswith("Stairs")]
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __len__(self):
return len(self.allBlocks)
def __iter__(self):
return iter(self.allBlocks)
def __getitem__(self, key):
""" Let's be magic. If we get a string, return the first block whose
name matches exactly. If we get a (id, data) pair or an id, return
that block. for example:
level.materials[0] # returns Air
level.materials["Air"] # also returns Air
level.materials["Powered Rail"] # returns Powered Rail
level.materials["Lapis Lazuli Block"] # in Classic
"""
if isinstance(key, basestring):
for b in self.allBlocks:
if b.name == key:
return b
raise KeyError("No blocks named: " + key)
if isinstance(key, (tuple, list)):
id, blockData = key
return self.blockWithID(id, blockData)
return self.blockWithID(key)
def blocksMatching(self, name):
name = name.lower()
return [v for v in self.allBlocks if name in v.name.lower() or name in v.aka.lower()]
def blockWithID(self, id, data=0):
if (id, data) in self.blocksByID:
return self.blocksByID[id, data]
else:
bl = Block(self, id, blockData=data)
bl.hasVariants = True
return bl
def addYamlBlocksFromFile(self, filename):
try:
import pkg_resources
f = pkg_resources.resource_stream(__name__, filename)
except (ImportError, IOError), e:
print "Cannot get resource_stream for ", filename, e
root = os.environ.get("PYMCLEVEL_YAML_ROOT", "pymclevel") # fall back to cwd as last resort
path = join(root, filename)
log.exception("Failed to read %s using pkg_resources. Trying %s instead." % (filename, path))
f = file(path)
try:
log.info(u"Loading block info from %s", f)
blockyaml = yaml.load(f)
self.addYamlBlocks(blockyaml)
except Exception, e:
log.error(u"Exception while loading block info from %s: %s", f, e)
raise
def addYamlBlocks(self, blockyaml):
self.yamlDatas.append(blockyaml)
for block in blockyaml['blocks']:
try:
self.addYamlBlock(block)
except Exception, e:
log.error(u"Exception while parsing block: %s", e)
log.error(u"Block definition: \n%s", pformat(block))
raise
def addYamlBlock(self, kw):
blockID = kw['id']
# xxx unused_yaml_properties variable unused; needed for
# documentation purpose of some sort? -zothar
#unused_yaml_properties = \
#['explored',
# # 'id',
# # 'idStr',
# # 'mapcolor',
# # 'name',
# # 'tex',
# ### 'tex_data',
# # 'tex_direction',
# ### 'tex_direction_data',
# 'tex_extra',
# # 'type'
# ]
for val, data in kw.get('data', {0: {}}).items():
datakw = dict(kw)
datakw.update(data)
idStr = datakw.get('idStr', "")
tex = [t * 16 for t in datakw.get('tex', self.defaultTex)]
texture = [tex] * 6
texDirs = {
"FORWARD": 5,
"BACKWARD": 4,
"LEFT": 1,
"RIGHT": 0,
"TOP": 2,
"BOTTOM": 3,
}
for dirname, dirtex in datakw.get('tex_direction', {}).items():
if dirname == "SIDES":
for dirname in ("LEFT", "RIGHT"):
texture[texDirs[dirname]] = [t * 16 for t in dirtex]
if dirname in texDirs:
texture[texDirs[dirname]] = [t * 16 for t in dirtex]
datakw['texture'] = texture
# print datakw
block = self.addBlock(blockID, val, **datakw)
block.yaml = datakw
self.idStr[blockID] = idStr
tex_direction_data = kw.get('tex_direction_data')
if tex_direction_data:
texture = datakw['texture']
# X+0, X-1, Y+, Y-, Z+b, Z-f
texDirMap = {
"NORTH": 0,
"EAST": 1,
"SOUTH": 2,
"WEST": 3,
}
def rot90cw():
rot = (5, 0, 2, 3, 4, 1)
texture[:] = [texture[r] for r in rot]
for data, dir in tex_direction_data.items():
for _i in range(texDirMap.get(dir, 0)):
rot90cw()
self.blockTextures[blockID][data] = texture
def addBlock(self, blockID, blockData=0, **kw):
name = kw.pop('name', self.names[blockID][blockData])
self.lightEmission[blockID] = kw.pop('brightness', self.defaultBrightness)
self.lightAbsorption[blockID] = kw.pop('opacity', self.defaultOpacity)
self.aka[blockID][blockData] = kw.pop('aka', "")
type = kw.pop('type', 'NORMAL')
color = kw.pop('mapcolor', self.flatColors[blockID, blockData])
self.flatColors[blockID, (blockData or slice(None))] = (tuple(color) + (255,))[:4]
texture = kw.pop('texture', None)
if texture:
self.blockTextures[blockID, (blockData or slice(None))] = texture
if blockData is 0:
self.names[blockID] = [name] * 16
self.type[blockID] = [type] * 16
else:
self.names[blockID][blockData] = name
self.type[blockID][blockData] = type
block = Block(self, blockID, blockData)
self.allBlocks.append(block)
self.blocksByType[type].append(block)
if (blockID, 0) in self.blocksByID:
self.blocksByID[blockID, 0].hasVariants = True
block.hasVariants = True
self.blocksByID[blockID, blockData] = block
return block
alphaMaterials = MCMaterials(defaultName="Future Block!")
alphaMaterials.name = "Alpha"
alphaMaterials.addYamlBlocksFromFile("minecraft.yaml")
# --- Special treatment for some blocks ---
HugeMushroomTypes = {
"Northwest": 1,
"North": 2,
"Northeast": 3,
"East": 6,
"Southeast": 9,
"South": 8,
"Southwest": 7,
"West": 4,
"Stem": 10,
"Top": 5,
}
from faces import FaceXDecreasing, FaceXIncreasing, FaceYIncreasing, FaceZDecreasing, FaceZIncreasing
Red = (0xD0, 0x70)
Brown = (0xE0, 0x70)
Pore = (0xE0, 0x80)
Stem = (0xD0, 0x80)
def defineShroomFaces(Shroom, id, name):
for way, data in sorted(HugeMushroomTypes.items(), key=lambda a: a[1]):
loway = way.lower()
if way is "Stem":
tex = [Stem, Stem, Pore, Pore, Stem, Stem]
elif way is "Pore":
tex = Pore
else:
tex = [Pore] * 6
tex[FaceYIncreasing] = Shroom
if "north" in loway:
tex[FaceZDecreasing] = Shroom
if "south" in loway:
tex[FaceZIncreasing] = Shroom
if "west" in loway:
tex[FaceXDecreasing] = Shroom
if "east" in loway:
tex[FaceXIncreasing] = Shroom
alphaMaterials.addBlock(id, blockData=data,
name="Huge " + name + " Mushroom (" + way + ")",
texture=tex,
)
defineShroomFaces(Brown, 99, "Brown")
defineShroomFaces(Red, 100, "Red")
classicMaterials = MCMaterials(defaultName="Not present in Classic")
classicMaterials.name = "Classic"
classicMaterials.addYamlBlocksFromFile("classic.yaml")
indevMaterials = MCMaterials(defaultName="Not present in Indev")
indevMaterials.name = "Indev"
indevMaterials.addYamlBlocksFromFile("indev.yaml")
pocketMaterials = MCMaterials()
pocketMaterials.name = "Pocket"
pocketMaterials.addYamlBlocksFromFile("pocket.yaml")
# --- Static block defs ---
alphaMaterials.Stone = alphaMaterials[1, 0]
alphaMaterials.Grass = alphaMaterials[2, 0]
alphaMaterials.Dirt = alphaMaterials[3, 0]
alphaMaterials.Cobblestone = alphaMaterials[4, 0]
alphaMaterials.WoodPlanks = alphaMaterials[5, 0]
alphaMaterials.Sapling = alphaMaterials[6, 0]
alphaMaterials.SpruceSapling = alphaMaterials[6, 1]
alphaMaterials.BirchSapling = alphaMaterials[6, 2]
alphaMaterials.Bedrock = alphaMaterials[7, 0]
alphaMaterials.WaterActive = alphaMaterials[8, 0]
alphaMaterials.Water = alphaMaterials[9, 0]
alphaMaterials.LavaActive = alphaMaterials[10, 0]
alphaMaterials.Lava = alphaMaterials[11, 0]
alphaMaterials.Sand = alphaMaterials[12, 0]
alphaMaterials.Gravel = alphaMaterials[13, 0]
alphaMaterials.GoldOre = alphaMaterials[14, 0]
alphaMaterials.IronOre = alphaMaterials[15, 0]
alphaMaterials.CoalOre = alphaMaterials[16, 0]
alphaMaterials.Wood = alphaMaterials[17, 0]
alphaMaterials.Ironwood = alphaMaterials[17, 1]
alphaMaterials.BirchWood = alphaMaterials[17, 2]
alphaMaterials.Leaves = alphaMaterials[18, 0]
alphaMaterials.PineLeaves = alphaMaterials[18, 1]
alphaMaterials.BirchLeaves = alphaMaterials[18, 2]
alphaMaterials.JungleLeaves = alphaMaterials[18, 3]
alphaMaterials.LeavesPermanent = alphaMaterials[18, 4]
alphaMaterials.PineLeavesPermanent = alphaMaterials[18, 5]
alphaMaterials.BirchLeavesPermanent = alphaMaterials[18, 6]
alphaMaterials.JungleLeavesPermanent = alphaMaterials[18, 7]
alphaMaterials.LeavesDecaying = alphaMaterials[18, 8]
alphaMaterials.PineLeavesDecaying = alphaMaterials[18, 9]
alphaMaterials.BirchLeavesDecaying = alphaMaterials[18, 10]
alphaMaterials.JungleLeavesDecaying = alphaMaterials[18, 11]
alphaMaterials.Sponge = alphaMaterials[19, 0]
alphaMaterials.Glass = alphaMaterials[20, 0]
alphaMaterials.LapisLazuliOre = alphaMaterials[21, 0]
alphaMaterials.LapisLazuliBlock = alphaMaterials[22, 0]
alphaMaterials.Dispenser = alphaMaterials[23, 0]
alphaMaterials.Sandstone = alphaMaterials[24, 0]
alphaMaterials.NoteBlock = alphaMaterials[25, 0]
alphaMaterials.Bed = alphaMaterials[26, 0]
alphaMaterials.PoweredRail = alphaMaterials[27, 0]
alphaMaterials.DetectorRail = alphaMaterials[28, 0]
alphaMaterials.StickyPiston = alphaMaterials[29, 0]
alphaMaterials.Web = alphaMaterials[30, 0]
alphaMaterials.UnusedShrub = alphaMaterials[31, 0]
alphaMaterials.TallGrass = alphaMaterials[31, 1]
alphaMaterials.Shrub = alphaMaterials[31, 2]
alphaMaterials.DesertShrub2 = alphaMaterials[32, 0]
alphaMaterials.Piston = alphaMaterials[33, 0]
alphaMaterials.PistonHead = alphaMaterials[34, 0]
alphaMaterials.WhiteWool = alphaMaterials[35, 0]
alphaMaterials.OrangeWool = alphaMaterials[35, 1]
alphaMaterials.MagentaWool = alphaMaterials[35, 2]
alphaMaterials.LightBlueWool = alphaMaterials[35, 3]
alphaMaterials.YellowWool = alphaMaterials[35, 4]
alphaMaterials.LightGreenWool = alphaMaterials[35, 5]
alphaMaterials.PinkWool = alphaMaterials[35, 6]
alphaMaterials.GrayWool = alphaMaterials[35, 7]
alphaMaterials.LightGrayWool = alphaMaterials[35, 8]
alphaMaterials.CyanWool = alphaMaterials[35, 9]
alphaMaterials.PurpleWool = alphaMaterials[35, 10]
alphaMaterials.BlueWool = alphaMaterials[35, 11]
alphaMaterials.BrownWool = alphaMaterials[35, 12]
alphaMaterials.DarkGreenWool = alphaMaterials[35, 13]
alphaMaterials.RedWool = alphaMaterials[35, 14]
alphaMaterials.BlackWool = alphaMaterials[35, 15]
alphaMaterials.Block36 = alphaMaterials[36, 0]
alphaMaterials.Flower = alphaMaterials[37, 0]
alphaMaterials.Rose = alphaMaterials[38, 0]
alphaMaterials.BrownMushroom = alphaMaterials[39, 0]
alphaMaterials.RedMushroom = alphaMaterials[40, 0]
alphaMaterials.BlockofGold = alphaMaterials[41, 0]
alphaMaterials.BlockofIron = alphaMaterials[42, 0]
alphaMaterials.DoubleStoneSlab = alphaMaterials[43, 0]
alphaMaterials.DoubleSandstoneSlab = alphaMaterials[43, 1]
alphaMaterials.DoubleWoodenSlab = alphaMaterials[43, 2]
alphaMaterials.DoubleCobblestoneSlab = alphaMaterials[43, 3]
alphaMaterials.DoubleBrickSlab = alphaMaterials[43, 4]
alphaMaterials.DoubleStoneBrickSlab = alphaMaterials[43, 5]
alphaMaterials.StoneSlab = alphaMaterials[44, 0]
alphaMaterials.SandstoneSlab = alphaMaterials[44, 1]
alphaMaterials.WoodenSlab = alphaMaterials[44, 2]
alphaMaterials.CobblestoneSlab = alphaMaterials[44, 3]
alphaMaterials.BrickSlab = alphaMaterials[44, 4]
alphaMaterials.StoneBrickSlab = alphaMaterials[44, 5]
alphaMaterials.Brick = alphaMaterials[45, 0]
alphaMaterials.TNT = alphaMaterials[46, 0]
alphaMaterials.Bookshelf = alphaMaterials[47, 0]
alphaMaterials.MossStone = alphaMaterials[48, 0]
alphaMaterials.Obsidian = alphaMaterials[49, 0]
alphaMaterials.Torch = alphaMaterials[50, 0]
alphaMaterials.Fire = alphaMaterials[51, 0]
alphaMaterials.MonsterSpawner = alphaMaterials[52, 0]
alphaMaterials.WoodenStairs = alphaMaterials[53, 0]
alphaMaterials.Chest = alphaMaterials[54, 0]
alphaMaterials.RedstoneWire = alphaMaterials[55, 0]
alphaMaterials.DiamondOre = alphaMaterials[56, 0]
alphaMaterials.BlockofDiamond = alphaMaterials[57, 0]
alphaMaterials.CraftingTable = alphaMaterials[58, 0]
alphaMaterials.Crops = alphaMaterials[59, 0]
alphaMaterials.Farmland = alphaMaterials[60, 0]
alphaMaterials.Furnace = alphaMaterials[61, 0]
alphaMaterials.LitFurnace = alphaMaterials[62, 0]
alphaMaterials.Sign = alphaMaterials[63, 0]
alphaMaterials.WoodenDoor = alphaMaterials[64, 0]
alphaMaterials.Ladder = alphaMaterials[65, 0]
alphaMaterials.Rail = alphaMaterials[66, 0]
alphaMaterials.StoneStairs = alphaMaterials[67, 0]
alphaMaterials.WallSign = alphaMaterials[68, 0]
alphaMaterials.Lever = alphaMaterials[69, 0]
alphaMaterials.StoneFloorPlate = alphaMaterials[70, 0]
alphaMaterials.IronDoor = alphaMaterials[71, 0]
alphaMaterials.WoodFloorPlate = alphaMaterials[72, 0]
alphaMaterials.RedstoneOre = alphaMaterials[73, 0]
alphaMaterials.RedstoneOreGlowing = alphaMaterials[74, 0]
alphaMaterials.RedstoneTorchOff = alphaMaterials[75, 0]
alphaMaterials.RedstoneTorchOn = alphaMaterials[76, 0]
alphaMaterials.Button = alphaMaterials[77, 0]
alphaMaterials.SnowLayer = alphaMaterials[78, 0]
alphaMaterials.Ice = alphaMaterials[79, 0]
alphaMaterials.Snow = alphaMaterials[80, 0]
alphaMaterials.Cactus = alphaMaterials[81, 0]
alphaMaterials.Clay = alphaMaterials[82, 0]
alphaMaterials.SugarCane = alphaMaterials[83, 0]
alphaMaterials.Jukebox = alphaMaterials[84, 0]
alphaMaterials.Fence = alphaMaterials[85, 0]
alphaMaterials.Pumpkin = alphaMaterials[86, 0]
alphaMaterials.Netherrack = alphaMaterials[87, 0]
alphaMaterials.SoulSand = alphaMaterials[88, 0]
alphaMaterials.Glowstone = alphaMaterials[89, 0]
alphaMaterials.NetherPortal = alphaMaterials[90, 0]
alphaMaterials.JackOLantern = alphaMaterials[91, 0]
alphaMaterials.Cake = alphaMaterials[92, 0]
alphaMaterials.RedstoneRepeaterOff = alphaMaterials[93, 0]
alphaMaterials.RedstoneRepeaterOn = alphaMaterials[94, 0]
alphaMaterials.AprilFoolsChest = alphaMaterials[95, 0]
alphaMaterials.Trapdoor = alphaMaterials[96, 0]
alphaMaterials.HiddenSilverfishStone = alphaMaterials[97, 0]
alphaMaterials.HiddenSilverfishCobblestone = alphaMaterials[97, 1]
alphaMaterials.HiddenSilverfishStoneBrick = alphaMaterials[97, 2]
alphaMaterials.StoneBricks = alphaMaterials[98, 0]
alphaMaterials.MossyStoneBricks = alphaMaterials[98, 1]
alphaMaterials.CrackedStoneBricks = alphaMaterials[98, 2]
alphaMaterials.HugeBrownMushroom = alphaMaterials[99, 0]
alphaMaterials.HugeRedMushroom = alphaMaterials[100, 0]
alphaMaterials.IronBars = alphaMaterials[101, 0]
alphaMaterials.GlassPane = alphaMaterials[102, 0]
alphaMaterials.Watermelon = alphaMaterials[103, 0]
alphaMaterials.PumpkinStem = alphaMaterials[104, 0]
alphaMaterials.MelonStem = alphaMaterials[105, 0]
alphaMaterials.Vines = alphaMaterials[106, 0]
alphaMaterials.FenceGate = alphaMaterials[107, 0]
alphaMaterials.BrickStairs = alphaMaterials[108, 0]
alphaMaterials.StoneBrickStairs = alphaMaterials[109, 0]
alphaMaterials.Mycelium = alphaMaterials[110, 0]
alphaMaterials.Lilypad = alphaMaterials[111, 0]
alphaMaterials.NetherBrick = alphaMaterials[112, 0]
alphaMaterials.NetherBrickFence = alphaMaterials[113, 0]
alphaMaterials.NetherBrickStairs = alphaMaterials[114, 0]
alphaMaterials.NetherWart = alphaMaterials[115, 0]
alphaMaterials.EnchantmentTable = alphaMaterials[116,0]
alphaMaterials.BrewingStand = alphaMaterials[117,0]
alphaMaterials.Cauldron = alphaMaterials[118,0]
alphaMaterials.EnderPortal = alphaMaterials[119,0]
alphaMaterials.PortalFrame = alphaMaterials[120,0]
alphaMaterials.EndStone = alphaMaterials[121,0]
alphaMaterials.DragonEgg = alphaMaterials[122,0]
alphaMaterials.RedstoneLampoff = alphaMaterials[123,0]
alphaMaterials.RedstoneLampon = alphaMaterials[124,0]
alphaMaterials.OakWoodDoubleSlab = alphaMaterials[125,0]
alphaMaterials.SpruceWoodDoubleSlab = alphaMaterials[125,1]
alphaMaterials.BirchWoodDoubleSlab = alphaMaterials[125,2]
alphaMaterials.JungleWoodDoubleSlab = alphaMaterials[125,3]
alphaMaterials.OakWoodSlab = alphaMaterials[126,0]
alphaMaterials.SpruceWoodSlab = alphaMaterials[126,1]
alphaMaterials.BirchWoodSlab = alphaMaterials[126,2]
alphaMaterials.JungleWoodSlab = alphaMaterials[126,3]
alphaMaterials.CocoaPlant = alphaMaterials[127,0]
alphaMaterials.SandstoneStairs = alphaMaterials[128,0]
alphaMaterials.EmeraldOre = alphaMaterials[129,0]
alphaMaterials.EnderChest = alphaMaterials[130,0]
alphaMaterials.TripwireHook = alphaMaterials[131,0]
alphaMaterials.Tripwire = alphaMaterials[132,0]
alphaMaterials.BlockofEmerald = alphaMaterials[133,0]
alphaMaterials.SpruceWoodStairs = alphaMaterials[134,0]
alphaMaterials.BirchWoodStairs = alphaMaterials[135,0]
alphaMaterials.JungleWoodStairs = alphaMaterials[136,0]
alphaMaterials.CommandBlock = alphaMaterials[137,0]
alphaMaterials.BeaconBlock = alphaMaterials[138,0]
alphaMaterials.CobblestoneWall = alphaMaterials[139,0]
alphaMaterials.MossyCobblestoneWall = alphaMaterials[139,1]
alphaMaterials.FlowerPot = alphaMaterials[140,0]
alphaMaterials.Carrots = alphaMaterials[141,0]
alphaMaterials.Potatoes = alphaMaterials[142,0]
alphaMaterials.WoodenButton = alphaMaterials[143,0]
alphaMaterials.MobHead = alphaMaterials[144,0]
alphaMaterials.Anvil = alphaMaterials[145,0]
alphaMaterials.TrappedChest = alphaMaterials[146,0]
alphaMaterials.WeightedPressurePlateLight = alphaMaterials[147,0]
alphaMaterials.WeightedPressurePlateHeavy = alphaMaterials[148,0]
alphaMaterials.RedstoneComparatorInactive = alphaMaterials[149,0]
alphaMaterials.RedstoneComparatorActive = alphaMaterials[150,0]
alphaMaterials.DaylightSensor = alphaMaterials[151,0]
alphaMaterials.BlockofRedstone = alphaMaterials[152,0]
alphaMaterials.NetherQuartzOre = alphaMaterials[153,0]
alphaMaterials.Hopper = alphaMaterials[154,0]
alphaMaterials.BlockofQuartz = alphaMaterials[155,0]
alphaMaterials.QuartzStairs = alphaMaterials[156,0]
alphaMaterials.ActivatorRail = alphaMaterials[157,0]
alphaMaterials.Dropper = alphaMaterials[158,0]
# --- Classic static block defs ---
classicMaterials.Stone = classicMaterials[1]
classicMaterials.Grass = classicMaterials[2]
classicMaterials.Dirt = classicMaterials[3]
classicMaterials.Cobblestone = classicMaterials[4]
classicMaterials.WoodPlanks = classicMaterials[5]
classicMaterials.Sapling = classicMaterials[6]
classicMaterials.Bedrock = classicMaterials[7]
classicMaterials.WaterActive = classicMaterials[8]
classicMaterials.Water = classicMaterials[9]
classicMaterials.LavaActive = classicMaterials[10]
classicMaterials.Lava = classicMaterials[11]
classicMaterials.Sand = classicMaterials[12]
classicMaterials.Gravel = classicMaterials[13]
classicMaterials.GoldOre = classicMaterials[14]
classicMaterials.IronOre = classicMaterials[15]
classicMaterials.CoalOre = classicMaterials[16]
classicMaterials.Wood = classicMaterials[17]
classicMaterials.Leaves = classicMaterials[18]
classicMaterials.Sponge = classicMaterials[19]
classicMaterials.Glass = classicMaterials[20]
classicMaterials.RedWool = classicMaterials[21]
classicMaterials.OrangeWool = classicMaterials[22]
classicMaterials.YellowWool = classicMaterials[23]
classicMaterials.LimeWool = classicMaterials[24]
classicMaterials.GreenWool = classicMaterials[25]
classicMaterials.AquaWool = classicMaterials[26]
classicMaterials.CyanWool = classicMaterials[27]
classicMaterials.BlueWool = classicMaterials[28]
classicMaterials.PurpleWool = classicMaterials[29]
classicMaterials.IndigoWool = classicMaterials[30]
classicMaterials.VioletWool = classicMaterials[31]
classicMaterials.MagentaWool = classicMaterials[32]
classicMaterials.PinkWool = classicMaterials[33]
classicMaterials.BlackWool = classicMaterials[34]
classicMaterials.GrayWool = classicMaterials[35]
classicMaterials.WhiteWool = classicMaterials[36]
classicMaterials.Flower = classicMaterials[37]
classicMaterials.Rose = classicMaterials[38]
classicMaterials.BrownMushroom = classicMaterials[39]
classicMaterials.RedMushroom = classicMaterials[40]
classicMaterials.BlockofGold = classicMaterials[41]
classicMaterials.BlockofIron = classicMaterials[42]
classicMaterials.DoubleStoneSlab = classicMaterials[43]
classicMaterials.StoneSlab = classicMaterials[44]
classicMaterials.Brick = classicMaterials[45]
classicMaterials.TNT = classicMaterials[46]
classicMaterials.Bookshelf = classicMaterials[47]
classicMaterials.MossStone = classicMaterials[48]
classicMaterials.Obsidian = classicMaterials[49]
# --- Indev static block defs ---
indevMaterials.Stone = indevMaterials[1]
indevMaterials.Grass = indevMaterials[2]
indevMaterials.Dirt = indevMaterials[3]
indevMaterials.Cobblestone = indevMaterials[4]
indevMaterials.WoodPlanks = indevMaterials[5]
indevMaterials.Sapling = indevMaterials[6]
indevMaterials.Bedrock = indevMaterials[7]
indevMaterials.WaterActive = indevMaterials[8]
indevMaterials.Water = indevMaterials[9]
indevMaterials.LavaActive = indevMaterials[10]
indevMaterials.Lava = indevMaterials[11]
indevMaterials.Sand = indevMaterials[12]
indevMaterials.Gravel = indevMaterials[13]
indevMaterials.GoldOre = indevMaterials[14]
indevMaterials.IronOre = indevMaterials[15]
indevMaterials.CoalOre = indevMaterials[16]
indevMaterials.Wood = indevMaterials[17]
indevMaterials.Leaves = indevMaterials[18]
indevMaterials.Sponge = indevMaterials[19]
indevMaterials.Glass = indevMaterials[20]
indevMaterials.RedWool = indevMaterials[21]
indevMaterials.OrangeWool = indevMaterials[22]
indevMaterials.YellowWool = indevMaterials[23]
indevMaterials.LimeWool = indevMaterials[24]
indevMaterials.GreenWool = indevMaterials[25]
indevMaterials.AquaWool = indevMaterials[26]
indevMaterials.CyanWool = indevMaterials[27]
indevMaterials.BlueWool = indevMaterials[28]
indevMaterials.PurpleWool = indevMaterials[29]
indevMaterials.IndigoWool = indevMaterials[30]
indevMaterials.VioletWool = indevMaterials[31]
indevMaterials.MagentaWool = indevMaterials[32]
indevMaterials.PinkWool = indevMaterials[33]
indevMaterials.BlackWool = indevMaterials[34]
indevMaterials.GrayWool = indevMaterials[35]
indevMaterials.WhiteWool = indevMaterials[36]
indevMaterials.Flower = indevMaterials[37]
indevMaterials.Rose = indevMaterials[38]
indevMaterials.BrownMushroom = indevMaterials[39]
indevMaterials.RedMushroom = indevMaterials[40]
indevMaterials.BlockofGold = indevMaterials[41]
indevMaterials.BlockofIron = indevMaterials[42]
indevMaterials.DoubleStoneSlab = indevMaterials[43]
indevMaterials.StoneSlab = indevMaterials[44]
indevMaterials.Brick = indevMaterials[45]
indevMaterials.TNT = indevMaterials[46]
indevMaterials.Bookshelf = indevMaterials[47]
indevMaterials.MossStone = indevMaterials[48]
indevMaterials.Obsidian = indevMaterials[49]
indevMaterials.Torch = indevMaterials[50, 0]
indevMaterials.Fire = indevMaterials[51, 0]
indevMaterials.InfiniteWater = indevMaterials[52, 0]
indevMaterials.InfiniteLava = indevMaterials[53, 0]
indevMaterials.Chest = indevMaterials[54, 0]
indevMaterials.Cog = indevMaterials[55, 0]
indevMaterials.DiamondOre = indevMaterials[56, 0]
indevMaterials.BlockofDiamond = indevMaterials[57, 0]
indevMaterials.CraftingTable = indevMaterials[58, 0]
indevMaterials.Crops = indevMaterials[59, 0]
indevMaterials.Farmland = indevMaterials[60, 0]
indevMaterials.Furnace = indevMaterials[61, 0]
indevMaterials.LitFurnace = indevMaterials[62, 0]
# --- Pocket static block defs ---
pocketMaterials.Air = pocketMaterials[0, 0]
pocketMaterials.Stone = pocketMaterials[1, 0]
pocketMaterials.Grass = pocketMaterials[2, 0]
pocketMaterials.Dirt = pocketMaterials[3, 0]
pocketMaterials.Cobblestone = pocketMaterials[4, 0]
pocketMaterials.WoodPlanks = pocketMaterials[5, 0]
pocketMaterials.Sapling = pocketMaterials[6, 0]
pocketMaterials.SpruceSapling = pocketMaterials[6, 1]
pocketMaterials.BirchSapling = pocketMaterials[6, 2]
pocketMaterials.Bedrock = pocketMaterials[7, 0]
pocketMaterials.Wateractive = pocketMaterials[8, 0]
pocketMaterials.Water = pocketMaterials[9, 0]
pocketMaterials.Lavaactive = pocketMaterials[10, 0]
pocketMaterials.Lava = pocketMaterials[11, 0]
pocketMaterials.Sand = pocketMaterials[12, 0]
pocketMaterials.Gravel = pocketMaterials[13, 0]
pocketMaterials.GoldOre = pocketMaterials[14, 0]
pocketMaterials.IronOre = pocketMaterials[15, 0]
pocketMaterials.CoalOre = pocketMaterials[16, 0]
pocketMaterials.Wood = pocketMaterials[17, 0]
pocketMaterials.PineWood = pocketMaterials[17, 1]
pocketMaterials.BirchWood = pocketMaterials[17, 2]
pocketMaterials.Leaves = pocketMaterials[18, 0]
pocketMaterials.Glass = pocketMaterials[20, 0]
pocketMaterials.LapisLazuliOre = pocketMaterials[21, 0]
pocketMaterials.LapisLazuliBlock = pocketMaterials[22, 0]
pocketMaterials.Sandstone = pocketMaterials[24, 0]
pocketMaterials.Bed = pocketMaterials[26, 0]
pocketMaterials.Web = pocketMaterials[30, 0]
pocketMaterials.UnusedShrub = pocketMaterials[31, 0]
pocketMaterials.TallGrass = pocketMaterials[31, 1]
pocketMaterials.Shrub = pocketMaterials[31, 2]
pocketMaterials.WhiteWool = pocketMaterials[35, 0]
pocketMaterials.OrangeWool = pocketMaterials[35, 1]
pocketMaterials.MagentaWool = pocketMaterials[35, 2]
pocketMaterials.LightBlueWool = pocketMaterials[35, 3]
pocketMaterials.YellowWool = pocketMaterials[35, 4]
pocketMaterials.LightGreenWool = pocketMaterials[35, 5]
pocketMaterials.PinkWool = pocketMaterials[35, 6]
pocketMaterials.GrayWool = pocketMaterials[35, 7]
pocketMaterials.LightGrayWool = pocketMaterials[35, 8]
pocketMaterials.CyanWool = pocketMaterials[35, 9]
pocketMaterials.PurpleWool = pocketMaterials[35, 10]
pocketMaterials.BlueWool = pocketMaterials[35, 11]
pocketMaterials.BrownWool = pocketMaterials[35, 12]
pocketMaterials.DarkGreenWool = pocketMaterials[35, 13]
pocketMaterials.RedWool = pocketMaterials[35, 14]
pocketMaterials.BlackWool = pocketMaterials[35, 15]
pocketMaterials.Flower = pocketMaterials[37, 0]
pocketMaterials.Rose = pocketMaterials[38, 0]
pocketMaterials.BrownMushroom = pocketMaterials[39, 0]
pocketMaterials.RedMushroom = pocketMaterials[40, 0]
pocketMaterials.BlockofGold = pocketMaterials[41, 0]
pocketMaterials.BlockofIron = pocketMaterials[42, 0]
pocketMaterials.DoubleStoneSlab = pocketMaterials[43, 0]
pocketMaterials.DoubleSandstoneSlab = pocketMaterials[43, 1]
pocketMaterials.DoubleWoodenSlab = pocketMaterials[43, 2]
pocketMaterials.DoubleCobblestoneSlab = pocketMaterials[43, 3]
pocketMaterials.DoubleBrickSlab = pocketMaterials[43, 4]
pocketMaterials.StoneSlab = pocketMaterials[44, 0]
pocketMaterials.SandstoneSlab = pocketMaterials[44, 1]
pocketMaterials.WoodenSlab = pocketMaterials[44, 2]
pocketMaterials.CobblestoneSlab = pocketMaterials[44, 3]
pocketMaterials.BrickSlab = pocketMaterials[44, 4]
pocketMaterials.Brick = pocketMaterials[45, 0]
pocketMaterials.TNT = pocketMaterials[46, 0]
pocketMaterials.Bookshelf = pocketMaterials[47, 0]
pocketMaterials.MossStone = pocketMaterials[48, 0]
pocketMaterials.Obsidian = pocketMaterials[49, 0]
pocketMaterials.Torch = pocketMaterials[50, 0]
pocketMaterials.Fire = pocketMaterials[51, 0]
pocketMaterials.WoodenStairs = pocketMaterials[53, 0]
pocketMaterials.Chest = pocketMaterials[54, 0]
pocketMaterials.DiamondOre = pocketMaterials[56, 0]
pocketMaterials.BlockofDiamond = pocketMaterials[57, 0]
pocketMaterials.CraftingTable = pocketMaterials[58, 0]
pocketMaterials.Crops = pocketMaterials[59, 0]
pocketMaterials.Farmland = pocketMaterials[60, 0]
pocketMaterials.Furnace = pocketMaterials[61, 0]
pocketMaterials.LitFurnace = pocketMaterials[62, 0]
pocketMaterials.WoodenDoor = pocketMaterials[64, 0]
pocketMaterials.Ladder = pocketMaterials[65, 0]
pocketMaterials.StoneStairs = pocketMaterials[67, 0]
pocketMaterials.IronDoor = pocketMaterials[71, 0]
pocketMaterials.RedstoneOre = pocketMaterials[73, 0]
pocketMaterials.RedstoneOreGlowing = pocketMaterials[74, 0]
pocketMaterials.SnowLayer = pocketMaterials[78, 0]
pocketMaterials.Ice = pocketMaterials[79, 0]
pocketMaterials.Snow = pocketMaterials[80, 0]
pocketMaterials.Cactus = pocketMaterials[81, 0]
pocketMaterials.Clay = pocketMaterials[82, 0]
pocketMaterials.SugarCane = pocketMaterials[83, 0]
pocketMaterials.Fence = pocketMaterials[85, 0]
pocketMaterials.Glowstone = pocketMaterials[89, 0]
pocketMaterials.InvisibleBedrock = pocketMaterials[95, 0]
pocketMaterials.Trapdoor = pocketMaterials[96, 0]
pocketMaterials.StoneBricks = pocketMaterials[98, 0]
pocketMaterials.GlassPane = pocketMaterials[102, 0]
pocketMaterials.Watermelon = pocketMaterials[103, 0]
pocketMaterials.MelonStem = pocketMaterials[105, 0]
pocketMaterials.FenceGate = pocketMaterials[107, 0]
pocketMaterials.BrickStairs = pocketMaterials[108, 0]
pocketMaterials.GlowingObsidian = pocketMaterials[246, 0]
pocketMaterials.NetherReactor = pocketMaterials[247, 0]
pocketMaterials.NetherReactorUsed = pocketMaterials[247, 1]
def printStaticDefs(name):
# printStaticDefs('alphaMaterials')
mats = eval(name)
for b in sorted(mats.allBlocks):
print "{name}.{0} = {name}[{1},{2}]".format(
b.name.replace(" ", "").replace("(","").replace(")",""),
b.ID, b.blockData,
name=name,
)
_indices = rollaxis(indices((id_limit, 16)), 0, 3)
def _filterTable(filters, unavailable, default=(0, 0)):
# a filter table is a id_limit table of (ID, data) pairs.
table = zeros((id_limit, 16, 2), dtype='uint8')
table[:] = _indices
for u in unavailable:
try:
if u[1] == 0:
u = u[0]
except TypeError:
pass
table[u] = default
for f, t in filters:
try:
if f[1] == 0:
f = f[0]
except TypeError:
pass
table[f] = t
return table
nullConversion = lambda b, d: (b, d)
def filterConversion(table):
def convert(blocks, data):
if data is None:
data = 0
t = table[blocks, data]
return t[..., 0], t[..., 1]
return convert
def guessFilterTable(matsFrom, matsTo):
""" Returns a pair (filters, unavailable)
filters is a list of (from, to) pairs; from and to are (ID, data) pairs
unavailable is a list of (ID, data) pairs in matsFrom not found in matsTo.
Searches the 'name' and 'aka' fields to find matches.
"""
filters = []
unavailable = []
toByName = dict(((b.name, b) for b in sorted(matsTo.allBlocks, reverse=True)))
for fromBlock in matsFrom.allBlocks:
block = toByName.get(fromBlock.name)
if block is None:
for b in matsTo.allBlocks:
if b.name.startswith(fromBlock.name):
block = b
break
if block is None:
for b in matsTo.allBlocks:
if fromBlock.name in b.name:
block = b
break
if block is None:
for b in matsTo.allBlocks:
if fromBlock.name in b.aka:
block = b
break
if block is None:
if "Indigo Wool" == fromBlock.name:
block = toByName.get("Purple Wool")
elif "Violet Wool" == fromBlock.name:
block = toByName.get("Purple Wool")
if block:
if block != fromBlock:
filters.append(((fromBlock.ID, fromBlock.blockData), (block.ID, block.blockData)))
else:
unavailable.append((fromBlock.ID, fromBlock.blockData))
return filters, unavailable
allMaterials = (alphaMaterials, classicMaterials, pocketMaterials, indevMaterials)
_conversionFuncs = {}
def conversionFunc(destMats, sourceMats):
if destMats is sourceMats:
return nullConversion
func = _conversionFuncs.get((destMats, sourceMats))
if func:
return func
filters, unavailable = guessFilterTable(sourceMats, destMats)
log.debug("")
log.debug("%s %s %s", sourceMats.name, "=>", destMats.name)
for a, b in [(sourceMats.blockWithID(*a), destMats.blockWithID(*b)) for a, b in filters]:
log.debug("{0:20}: \"{1}\"".format('"' + a.name + '"', b.name))
log.debug("")
log.debug("Missing blocks: %s", [sourceMats.blockWithID(*a).name for a in unavailable])
table = _filterTable(filters, unavailable, (35, 0))
func = filterConversion(table)
_conversionFuncs[(destMats, sourceMats)] = func
return func
def convertBlocks(destMats, sourceMats, blocks, blockData):
if sourceMats == destMats:
return blocks, blockData
return conversionFunc(destMats, sourceMats)(blocks, blockData)
namedMaterials = dict((i.name, i) for i in allMaterials)
__all__ = "indevMaterials, pocketMaterials, alphaMaterials, classicMaterials, namedMaterials, MCMaterials".split(", ")
|
|
from models.retinanet.builder import RetinaNet as Detector
from models.NASFPN.builder import ResNetV1bFPN as Backbone
from models.sepc.builder import RetinaNetNeckWithBNWithSEPC as Neck
from models.sepc.builder import RetinaNetHeadWithBNWithSEPC as RpnHead
from mxnext.complicate import normalizer_factory
def get_config(is_train):
class General:
log_frequency = 100
name = __name__.rsplit("/")[-1].rsplit(".")[-1]
batch_image = 4 if is_train else 1
fp16 = True
class KvstoreParam:
kvstore = "nccl"
batch_image = General.batch_image
gpus = [0, 1, 2, 3, 4, 5, 6, 7]
fp16 = General.fp16
class NormalizeParam:
normalizer = normalizer_factory(type="syncbn", ndev=len(KvstoreParam.gpus), wd_mult=1.0, lr_mult=1.0, eps=1e-4, mom=0.997)
class BackboneParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
depth = 50
class NeckParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class SEPCParam:
out_channels = 256
pconv_num = 4
start_level = 1
pconv_deform = False
ibn = True
normalizer = NormalizeParam.normalizer
lcconv_deform = False
pad_sizes = (800,1333)
stride = (8, 16, 32, 64, 128)
class RpnParam:
num_class = 1 + 80
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
batch_image = General.batch_image
nb_conv = 0
class anchor_generate:
scale = (4 * 2 ** 0, 4 * 2 ** (1.0 / 3.0), 4 * 2 ** (2.0 / 3.0))
ratio = (0.5, 1.0, 2.0)
stride = (8, 16, 32, 64, 128)
image_anchor = None
class head:
conv_channel = 256
mean = None
std = None
assert conv_channel == SEPCParam.out_channels
class proposal:
pre_nms_top_n = 1000
post_nms_top_n = None
nms_thr = None
min_bbox_side = None
min_det_score = 0.05 # filter score in network
class focal_loss:
alpha = 0.25
gamma = 2.0
class BboxParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
num_class = None
image_roi = None
batch_image = None
class RoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = None
stride = None
class DatasetParam:
if is_train:
image_set = ("coco_train2017", )
else:
image_set = ("coco_val2017", )
backbone = Backbone(BackboneParam)
neck = Neck(NeckParam, SEPCParam)
rpn_head = RpnHead(RpnParam)
detector = Detector()
if is_train:
train_sym = detector.get_train_symbol(backbone, neck, rpn_head)
test_sym = None
else:
train_sym = None
test_sym = detector.get_test_symbol(backbone, neck, rpn_head)
class ModelParam:
train_symbol = train_sym
test_symbol = test_sym
from_scratch = False
random = True
memonger = False
memonger_until = "stage3_unit21_plus"
class pretrain:
prefix = "pretrain_model/resnet%s_v1b" % BackboneParam.depth
epoch = 0
fixed_param = ["conv0"]
class OptimizeParam:
class optimizer:
type = "sgd"
lr = 0.005 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image
momentum = 0.9
wd = 0.0001
clip_gradient = None
class schedule:
begin_epoch = 0
end_epoch = 6
lr_iter = [60000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image),
80000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)]
class warmup:
type = "gradual"
lr = 0.005 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image / 3
iter = 1000
class TestParam:
min_det_score = 0 # filter appended boxes
max_det_per_image = 100
process_roidb = lambda x: x
process_output = lambda x, y: x
class model:
prefix = "experiments/{}/checkpoint".format(General.name)
epoch = OptimizeParam.schedule.end_epoch
class nms:
type = "nms"
thr = 0.5
class coco:
annotation = "data/coco/annotations/instances_val2017.json"
# data processing
class NormParam:
mean = tuple(i * 255 for i in (0.485, 0.456, 0.406))
std = tuple(i * 255 for i in (0.229, 0.224, 0.225))
class ResizeParam:
short = 800
long = 1333
class PadParam:
short = 800
long = 1333
max_num_gt = 100
class AnchorTarget2DParam:
def __init__(self):
self.generate = self._generate()
class _generate:
def __init__(self):
self.short = (100, 50, 25, 13, 7)
self.long = (167, 84, 42, 21, 11)
self.stride = (8, 16, 32, 64, 128)
scales = (4 * 2 ** 0, 4 * 2 ** (1.0 / 3.0), 4 * 2 ** (2.0 / 3.0))
aspects = (0.5, 1.0, 2.0)
class assign:
allowed_border = 9999
pos_thr = 0.5
neg_thr = 0.4
min_pos_thr = 0.0
class sample:
image_anchor = None
pos_fraction = None
class RenameParam:
mapping = dict(image="data")
from core.detection_input import ReadRoiRecord, Resize2DImageBbox, \
ConvertImageFromHwcToChw, Flip2DImageBbox, Pad2DImageBbox, \
RenameRecord
from models.retinanet.input import PyramidAnchorTarget2D, Norm2DImage
if is_train:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
Flip2DImageBbox(),
Pad2DImageBbox(PadParam),
ConvertImageFromHwcToChw(),
PyramidAnchorTarget2D(AnchorTarget2DParam()),
RenameRecord(RenameParam.mapping)
]
data_name = ["data"]
label_name = ["rpn_cls_label", "rpn_reg_target", "rpn_reg_weight"]
else:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
Pad2DImageBbox(PadParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data", "im_info", "im_id", "rec_id"]
label_name = []
from models.retinanet import metric
rpn_acc_metric = metric.FGAccMetric(
"FGAcc",
["cls_loss_output"],
["rpn_cls_label"]
)
metric_list = [rpn_acc_metric]
return General, KvstoreParam, RpnParam, RoiParam, BboxParam, DatasetParam, \
ModelParam, OptimizeParam, TestParam, \
transform, data_name, label_name, metric_list
|
|
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
#
# This module is part of GitDB and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
"""Contains basic implementations for the interface building blocks"""
from git.db.interface import *
from git.util import (
pool,
join,
isfile,
normpath,
abspath,
dirname,
LazyMixin,
hex_to_bin,
bin_to_hex,
expandvars,
expanduser,
exists,
is_git_dir,
)
from git.index import IndexFile
from git.config import GitConfigParser
from git.exc import (
BadObject,
AmbiguousObjectName,
InvalidGitRepositoryError,
NoSuchPathError
)
from async import ChannelThreadTask
from itertools import chain
import sys
import os
__all__ = ( 'PureObjectDBR', 'PureObjectDBW', 'PureRootPathDB', 'PureCompoundDB',
'PureConfigurationMixin', 'PureRepositoryPathsMixin', 'PureAlternatesFileMixin',
'PureIndexDB')
class PureObjectDBR(ObjectDBR):
#{ Query Interface
def has_object_async(self, reader):
task = ChannelThreadTask(reader, str(self.has_object_async), lambda sha: (sha, self.has_object(sha)))
return pool.add_task(task)
def info_async(self, reader):
task = ChannelThreadTask(reader, str(self.info_async), self.info)
return pool.add_task(task)
def stream_async(self, reader):
# base implementation just uses the stream method repeatedly
task = ChannelThreadTask(reader, str(self.stream_async), self.stream)
return pool.add_task(task)
def partial_to_complete_sha_hex(self, partial_hexsha):
len_partial_hexsha = len(partial_hexsha)
if len_partial_hexsha % 2 != 0:
partial_binsha = hex_to_bin(partial_hexsha + "0")
else:
partial_binsha = hex_to_bin(partial_hexsha)
# END assure successful binary conversion
return self.partial_to_complete_sha(partial_binsha, len(partial_hexsha))
#} END query interface
class PureObjectDBW(ObjectDBW):
def __init__(self, *args, **kwargs):
try:
super(PureObjectDBW, self).__init__(*args, **kwargs)
except TypeError:
pass
#END handle py 2.6
self._ostream = None
#{ Edit Interface
def set_ostream(self, stream):
cstream = self._ostream
self._ostream = stream
return cstream
def ostream(self):
return self._ostream
def store_async(self, reader):
task = ChannelThreadTask(reader, str(self.store_async), self.store)
return pool.add_task(task)
#} END edit interface
class PureRootPathDB(RootPathDB):
def __init__(self, root_path):
self._root_path = root_path
super(PureRootPathDB, self).__init__(root_path)
#{ Interface
def root_path(self):
return self._root_path
def db_path(self, rela_path=None):
if not rela_path:
return self._root_path
return join(self._root_path, rela_path)
#} END interface
def _databases_recursive(database, output):
"""Fill output list with database from db, in order. Deals with Loose, Packed
and compound databases."""
if isinstance(database, CompoundDB):
compounds = list()
dbs = database.databases()
output.extend(db for db in dbs if not isinstance(db, CompoundDB))
for cdb in (db for db in dbs if isinstance(db, CompoundDB)):
_databases_recursive(cdb, output)
else:
output.append(database)
# END handle database type
class PureCompoundDB(CompoundDB, PureObjectDBR, LazyMixin, CachingDB):
def _set_cache_(self, attr):
if attr == '_dbs':
self._dbs = list()
else:
super(PureCompoundDB, self)._set_cache_(attr)
#{ PureObjectDBR interface
def has_object(self, sha):
for db in self._dbs:
if db.has_object(sha):
return True
#END for each db
return False
def info(self, sha):
for db in self._dbs:
try:
return db.info(sha)
except BadObject:
pass
#END for each db
def stream(self, sha):
for db in self._dbs:
try:
return db.stream(sha)
except BadObject:
pass
#END for each db
def size(self):
return reduce(lambda x,y: x+y, (db.size() for db in self._dbs), 0)
def sha_iter(self):
return chain(*(db.sha_iter() for db in self._dbs))
#} END object DBR Interface
#{ Interface
def databases(self):
return tuple(self._dbs)
def update_cache(self, force=False):
# something might have changed, clear everything
stat = False
for db in self._dbs:
if isinstance(db, CachingDB):
stat |= db.update_cache(force)
# END if is caching db
# END for each database to update
return stat
def partial_to_complete_sha_hex(self, partial_hexsha):
len_partial_hexsha = len(partial_hexsha)
if len_partial_hexsha % 2 != 0:
partial_binsha = hex_to_bin(partial_hexsha + "0")
else:
partial_binsha = hex_to_bin(partial_hexsha)
# END assure successful binary conversion
candidate = None
for db in self._dbs:
full_bin_sha = None
try:
if hasattr(db, 'partial_to_complete_sha_hex'):
full_bin_sha = db.partial_to_complete_sha_hex(partial_hexsha)
else:
full_bin_sha = db.partial_to_complete_sha(partial_binsha, len_partial_hexsha)
# END handle database type
except BadObject:
continue
# END ignore bad objects
if full_bin_sha:
if candidate and candidate != full_bin_sha:
raise AmbiguousObjectName(partial_hexsha)
candidate = full_bin_sha
# END handle candidate
# END for each db
if not candidate:
raise BadObject(partial_binsha)
return candidate
def partial_to_complete_sha(self, partial_binsha, hex_len):
"""Simple adaptor to feed into our implementation"""
return self.partial_to_complete_sha_hex(bin_to_hex(partial_binsha)[:hex_len])
#} END interface
class PureRepositoryPathsMixin(RepositoryPathsMixin):
# slots has no effect here, its just to keep track of used attrs
__slots__ = ("_git_path", '_bare', '_working_tree_dir')
#{ Configuration
repo_dir = '.git'
objs_dir = 'objects'
#} END configuration
#{ Subclass Interface
def _initialize(self, path):
epath = abspath(expandvars(expanduser(path or os.getcwd())))
if not exists(epath):
raise NoSuchPathError(epath)
#END check file
self._working_tree_dir = None
self._git_path = None
curpath = epath
# walk up the path to find the .git dir
while curpath:
if is_git_dir(curpath):
self._git_path = curpath
self._working_tree_dir = os.path.dirname(curpath)
break
gitpath = join(curpath, self.repo_dir)
if is_git_dir(gitpath):
self._git_path = gitpath
self._working_tree_dir = curpath
break
curpath, dummy = os.path.split(curpath)
if not dummy:
break
# END while curpath
if self._git_path is None:
raise InvalidGitRepositoryError(epath)
# END path not found
self._bare = self._working_tree_dir is None
if hasattr(self, 'config_reader'):
try:
self._bare = self.config_reader("repository").getboolean('core','bare')
except Exception:
# lets not assume the option exists, although it should
pass
#END handle exception
#END check bare flag
self._working_tree_dir = self._bare and None or self._working_tree_dir
#} end subclass interface
#{ Object Interface
def __eq__(self, rhs):
if hasattr(rhs, 'git_dir'):
return self.git_dir == rhs.git_dir
return False
def __ne__(self, rhs):
return not self.__eq__(rhs)
def __hash__(self):
return hash(self.git_dir)
def __repr__(self):
return "%s(%r)" % (type(self).__name__, self.git_dir)
#} END object interface
#{ Interface
@property
def is_bare(self):
return self._bare
@property
def git_dir(self):
return self._git_path
@property
def working_tree_dir(self):
if self._working_tree_dir is None:
raise AssertionError("Repository at %s is bare and does not have a working tree directory" % self.git_dir)
#END assertion
return dirname(self.git_dir)
@property
def objects_dir(self):
return join(self.git_dir, self.objs_dir)
@property
def working_dir(self):
if self.is_bare:
return self.git_dir
else:
return self.working_tree_dir
#END handle bare state
def _mk_description():
def _get_description(self):
filename = join(self.git_dir, 'description')
return file(filename).read().rstrip()
def _set_description(self, descr):
filename = join(self.git_dir, 'description')
file(filename, 'w').write(descr+'\n')
return property(_get_description, _set_description, "Descriptive text for the content of the repository")
description = _mk_description()
del(_mk_description)
#} END interface
class PureConfigurationMixin(ConfigurationMixin):
#{ Configuration
system_config_file_name = "gitconfig"
repo_config_file_name = "config"
#} END
def __new__(cls, *args, **kwargs):
"""This is just a stupid workaround for the evil py2.6 change which makes mixins quite impossible"""
return super(PureConfigurationMixin, cls).__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
"""Verify prereqs"""
try:
super(PureConfigurationMixin, self).__init__(*args, **kwargs)
except TypeError:
pass
#END handle code-breaking change in python 2.6
assert hasattr(self, 'git_dir')
def _path_at_level(self, level ):
# we do not support an absolute path of the gitconfig on windows ,
# use the global config instead
if sys.platform == "win32" and level == "system":
level = "global"
#END handle windows
if level == "system":
return "/etc/%s" % self.system_config_file_name
elif level == "global":
return normpath(expanduser("~/.%s" % self.system_config_file_name))
elif level == "repository":
return join(self.git_dir, self.repo_config_file_name)
#END handle level
raise ValueError("Invalid configuration level: %r" % level)
#{ Interface
def config_reader(self, config_level=None):
files = None
if config_level is None:
files = [ self._path_at_level(f) for f in self.config_level ]
else:
files = [ self._path_at_level(config_level) ]
#END handle level
return GitConfigParser(files, read_only=True)
def config_writer(self, config_level="repository"):
return GitConfigParser(self._path_at_level(config_level), read_only=False)
#} END interface
class PureIndexDB(IndexDB):
#{ Configuration
IndexCls = IndexFile
#} END configuration
@property
def index(self):
return self.IndexCls(self)
class PureAlternatesFileMixin(object):
"""Utility able to read and write an alternates file through the alternates property
It needs to be part of a type with the git_dir or db_path property.
The file by default is assumed to be located at the default location as imposed
by the standard git repository layout"""
#{ Configuration
alternates_filepath = os.path.join('info', 'alternates') # relative path to alternates file
#} END configuration
def __init__(self, *args, **kwargs):
try:
super(PureAlternatesFileMixin, self).__init__(*args, **kwargs)
except TypeError:
pass
#END handle py2.6 code breaking changes
self._alternates_path() # throws on incompatible type
#{ Interface
def _alternates_path(self):
if hasattr(self, 'git_dir'):
return join(self.git_dir, 'objects', self.alternates_filepath)
elif hasattr(self, 'db_path'):
return self.db_path(self.alternates_filepath)
else:
raise AssertionError("This mixin requires a parent type with either the git_dir property or db_path method")
#END handle path
def _get_alternates(self):
"""The list of alternates for this repo from which objects can be retrieved
:return: list of strings being pathnames of alternates"""
alternates_path = self._alternates_path()
if os.path.exists(alternates_path):
try:
f = open(alternates_path)
alts = f.read()
finally:
f.close()
return alts.strip().splitlines()
else:
return list()
# END handle path exists
def _set_alternates(self, alts):
"""Sets the alternates
:parm alts:
is the array of string paths representing the alternates at which
git should look for objects, i.e. /home/user/repo/.git/objects
:raise NoSuchPathError:
:note:
The method does not check for the existance of the paths in alts
as the caller is responsible."""
alternates_path = self._alternates_path()
if not alts:
if isfile(alternates_path):
os.remove(alternates_path)
else:
try:
f = open(alternates_path, 'w')
f.write("\n".join(alts))
finally:
f.close()
# END file handling
# END alts handling
alternates = property(_get_alternates, _set_alternates, doc="Retrieve a list of alternates paths or set a list paths to be used as alternates")
#} END interface
|
|
"""Univariate features selection."""
# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.
# L. Buitinck, A. Joly
# License: BSD 3 clause
import numpy as np
import warnings
from scipy import special, stats
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..preprocessing import LabelBinarizer
from ..utils import (as_float_array, check_array, check_X_y, safe_sqr,
safe_mask)
from ..utils.extmath import norm, safe_sparse_dot
from ..utils.validation import check_is_fitted
from .base import SelectorMixin
def _clean_nans(scores):
"""
Fixes Issue #1240: NaNs can't be properly compared, so change them to the
smallest value of scores's dtype. -inf seems to be unreliable.
"""
# XXX where should this function be called? fit? scoring functions
# themselves?
scores = as_float_array(scores, copy=True)
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
return scores
######################################################################
# Scoring functions
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def f_oneway(*args):
"""Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like, sparse matrices
The sample measurements should be given as arguments.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.)[0]
if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):
warnings.warn("Features %s are constant." % constant_features_idx,
UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = stats.fprob(dfbn, dfwn, f)
return f, prob
def f_classif(X, y):
"""Compute the Anova F-value for the provided sample
Parameters
----------
X : {array-like, sparse matrix} shape = [n_samples, n_features]
The set of regressors that will tested sequentially.
y : array of shape(n_samples)
The data matrix.
Returns
-------
F : array, shape = [n_features,]
The set of F values.
pval : array, shape = [n_features,]
The set of p-values.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
def _chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
k = len(f_obs)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq, special.chdtrc(k - 1, chisq)
def chi2(X, y):
"""Compute chi-squared statistic for each class/feature combination.
This score can be used to select the n_features features with the
highest values for the test chi-squared statistic from X, which must
contain booleans or frequencies (e.g., term counts in document
classification), relative to the classes.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features_in)
Sample vectors.
y : array-like, shape = (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : array, shape = (n_features,)
chi2 statistics of each feature.
pval : array, shape = (n_features,)
p-values of each feature.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
"""
# XXX: we might want to do some of the following in logspace instead for
# numerical stability.
X = check_array(X, accept_sparse='csr')
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
Y = LabelBinarizer().fit_transform(y)
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
feature_count = check_array(X.sum(axis=0))
class_prob = check_array(Y.mean(axis=0))
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected)
def f_regression(X, y, center=True):
"""Univariate linear regression tests
Quick linear model for testing the effect of a single regressor,
sequentially for many regressors.
This is done in 3 steps:
1. the regressor of interest and the data are orthogonalized
wrt constant regressors
2. the cross correlation between data and regressors is computed
3. it is converted to an F score then to a p-value
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will tested sequentially.
y : array of shape(n_samples).
The data matrix
center : True, bool,
If true, X and y will be centered.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
"""
if issparse(X) and center:
raise ValueError("center=True only allowed for dense data")
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float)
if center:
y = y - np.mean(y)
X = X.copy('F') # faster in fortran
X -= X.mean(axis=0)
# compute the correlation
corr = safe_sparse_dot(y, X)
# XXX could use corr /= row_norms(X.T) here, but the test doesn't pass
corr /= np.asarray(np.sqrt(safe_sqr(X).sum(axis=0))).ravel()
corr /= norm(y)
# convert to p-value
degrees_of_freedom = y.size - (2 if center else 1)
F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom
pv = stats.f.sf(F, 1, degrees_of_freedom)
return F, pv
######################################################################
# Base classes
class _BaseFilter(BaseEstimator, SelectorMixin):
"""Initialize the univariate feature selection.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
"""
def __init__(self, score_func):
self.score_func = score_func
def fit(self, X, y):
"""Run score function on (X, y) and get the appropriate features.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
if not callable(self.score_func):
raise TypeError("The score function should be a callable, %s (%s) "
"was passed."
% (self.score_func, type(self.score_func)))
self._check_params(X, y)
self.scores_, self.pvalues_ = self.score_func(X, y)
self.scores_ = np.asarray(self.scores_)
self.pvalues_ = np.asarray(self.pvalues_)
return self
def _check_params(self, X, y):
pass
######################################################################
# Specific filters
######################################################################
class SelectPercentile(_BaseFilter):
"""Select features according to a percentile of the highest scores.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
percentile : int, optional, default=10
Percent of features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
"""
def __init__(self, score_func=f_classif, percentile=10):
super(SelectPercentile, self).__init__(score_func)
self.percentile = percentile
def _check_params(self, X, y):
if not 0 <= self.percentile <= 100:
raise ValueError("percentile should be >=0, <=100; got %r"
% self.percentile)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
# Cater for NaNs
if self.percentile == 100:
return np.ones(len(self.scores_), dtype=np.bool)
elif self.percentile == 0:
return np.zeros(len(self.scores_), dtype=np.bool)
scores = _clean_nans(self.scores_)
treshold = stats.scoreatpercentile(scores,
100 - self.percentile)
mask = scores > treshold
ties = np.where(scores == treshold)[0]
if len(ties):
max_feats = len(scores) * self.percentile // 100
kept_ties = ties[:max_feats - mask.sum()]
mask[kept_ties] = True
return mask
class SelectKBest(_BaseFilter):
"""Select features according to the k highest scores.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
k : int or "all", optional, default=10
Number of top features to select.
The "all" option bypasses selection, for use in a parameter search.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
"""
def __init__(self, score_func=f_classif, k=10):
super(SelectKBest, self).__init__(score_func)
self.k = k
def _check_params(self, X, y):
if not (self.k == "all" or 0 <= self.k <= X.shape[1]):
raise ValueError("k should be >=0, <= n_features; got %r."
"Use k='all' to return all features."
% self.k)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
if self.k == 'all':
return np.ones(self.scores_.shape, dtype=bool)
elif self.k == 0:
return np.zeros(self.scores_.shape, dtype=bool)
else:
scores = _clean_nans(self.scores_)
mask = np.zeros(scores.shape, dtype=bool)
# Request a stable sort. Mergesort takes more memory (~40MB per
# megafeature on x86-64).
mask[np.argsort(scores, kind="mergesort")[-self.k:]] = 1
return mask
class SelectFpr(_BaseFilter):
"""Filter: Select the pvalues below alpha based on a FPR test.
FPR test stands for False Positive Rate test. It controls the total
amount of false detections.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest p-value for features to be kept.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFpr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return self.pvalues_ < self.alpha
class SelectFdr(_BaseFilter):
"""Filter: Select the p-values for an estimated false discovery rate
This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound
on the expected false discovery rate.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
References
----------
http://en.wikipedia.org/wiki/False_discovery_rate
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFdr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
n_features = len(self.pvalues_)
sv = np.sort(self.pvalues_)
selected = sv[sv <= float(self.alpha) / n_features
* np.arange(n_features)]
if selected.size == 0:
return np.zeros_like(self.pvalues_, dtype=bool)
return self.pvalues_ <= selected.max()
class SelectFwe(_BaseFilter):
"""Filter: Select the p-values corresponding to Family-wise error rate
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFwe, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return (self.pvalues_ < self.alpha / len(self.pvalues_))
######################################################################
# Generic filter
######################################################################
# TODO this class should fit on either p-values or scores,
# depending on the mode.
class GenericUnivariateSelect(_BaseFilter):
"""Univariate feature selector with configurable strategy.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}
Feature selection mode.
param : float or int depending on the feature selection mode
Parameter of the corresponding mode.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
"""
_selection_modes = {'percentile': SelectPercentile,
'k_best': SelectKBest,
'fpr': SelectFpr,
'fdr': SelectFdr,
'fwe': SelectFwe}
def __init__(self, score_func=f_classif, mode='percentile', param=1e-5):
super(GenericUnivariateSelect, self).__init__(score_func)
self.mode = mode
self.param = param
def _make_selector(self):
selector = self._selection_modes[self.mode](score_func=self.score_func)
# Now perform some acrobatics to set the right named parameter in
# the selector
possible_params = selector._get_param_names()
possible_params.remove('score_func')
selector.set_params(**{possible_params[0]: self.param})
return selector
def _check_params(self, X, y):
if self.mode not in self._selection_modes:
raise ValueError("The mode passed should be one of %s, %r,"
" (type %s) was passed."
% (self._selection_modes.keys(), self.mode,
type(self.mode)))
self._make_selector()._check_params(X, y)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
selector = self._make_selector()
selector.pvalues_ = self.pvalues_
selector.scores_ = self.scores_
return selector._get_support_mask()
|
|
import os
import py.test
from tiddlyweb.config import config
from tiddlyweb.store import Store, NoBagError, NoUserError, NoRecipeError, NoTiddlerError
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.recipe import Recipe
from tiddlyweb.model.tiddler import Tiddler
from tiddlyweb.model.user import User
from tiddlywebplugins.mysql3 import Base
from base64 import b64encode
#RANGE = 1000
RANGE = 10
def setup_module(module):
module.store = Store(
config['server_store'][0],
config['server_store'][1],
{'tiddlyweb.config': config}
)
# delete everything
Base.metadata.drop_all()
Base.metadata.create_all()
import warnings
warnings.simplefilter('error')
def test_make_a_bunch():
for x in xrange(RANGE):
bag_name = u'bag%s' % x
recipe_name = u'recipe%s' % x
tiddler_name = u'tiddler%s' % x
recipe_list = [(bag_name, '')]
tiddler_text = u'hey ho %s' % x
field_name = u'field%s' % x
field_name2 = u'fieldone%s' % x
tag_name = u'tag%s' % x
user_name = u'user%s' % x
user_pass = u'pass%s' % x
user_note = u'note%s' % x
user_roles = [u'rolehold', u'role%s' % x]
bag = Bag(bag_name)
bag.policy.owner = u'owner%s' % x
bag.policy.read = [u'hi%s' % x, u'andextra']
bag.policy.manage = [u'R:hi%s' % x, u'andmanage']
store.put(bag)
recipe = Recipe(recipe_name)
recipe.policy.owner = u'owner%s' % x
recipe.policy.read = [u'hi%s' % x, u'andextra']
recipe.policy.manage = [u'R:hi%s' % x, u'andmanage']
recipe.set_recipe(recipe_list)
store.put(recipe)
tiddler = Tiddler(tiddler_name, bag_name)
tiddler.text = tiddler_text
tiddler.fields[field_name] = field_name
tiddler.fields[field_name2] = field_name2
tiddler.fields['server.host'] = 'gunky'
tiddler.tags = [tag_name]
store.put(tiddler)
store.put(tiddler)
user = User(user_name)
user.set_password(user_pass)
user.note = user_note
for role in user_roles:
user.add_role(role)
store.put(user)
bags = [bag.name for bag in store.list_bags()]
recipes = [recipe.name for recipe in store.list_recipes()]
users = [user.usersign for user in store.list_users()]
assert len(bags) == RANGE
assert len(recipes) == RANGE
assert len(users) == RANGE
for x in xrange(RANGE):
bname = 'bag%s' % x
rname = 'recipe%s' % x
uname = 'user%s' % x
assert bname in bags
assert rname in recipes
assert uname in users
tiddler = store.get(Tiddler(u'tiddler0', u'bag0'))
assert tiddler.fields['field0'] == 'field0'
assert tiddler.fields['fieldone0'] == 'fieldone0'
bag = Bag(u'bag0')
bag = store.get(bag)
tiddlers = []
for tiddler in store.list_bag_tiddlers(bag):
tiddlers.append(store.get(tiddler))
assert len(tiddlers) == 1
assert tiddlers[0].title == 'tiddler0'
assert tiddlers[0].fields['field0'] == 'field0'
assert tiddlers[0].fields['fieldone0'] == 'fieldone0'
assert tiddlers[0].tags == ['tag0']
assert sorted(bag.policy.read) == ['andextra', 'hi0']
assert sorted(bag.policy.manage) == ['R:hi0', u'andmanage']
assert bag.policy.owner == 'owner0'
user = User(u'user1')
user = store.get(user)
assert user.usersign == 'user1'
assert user.check_password('pass1')
assert user.note == 'note1'
assert 'role1' in user.list_roles()
assert 'rolehold' in user.list_roles()
recipe = Recipe(u'recipe2')
recipe = store.get(recipe)
assert recipe.name == 'recipe2'
bags = [bag_name for bag_name, filter in recipe.get_recipe()]
assert len(bags) == 1
assert 'bag2' in bags
assert sorted(recipe.policy.read) == ['andextra', 'hi2']
assert sorted(recipe.policy.manage) == ['R:hi2', u'andmanage']
assert recipe.policy.owner == 'owner2'
recipe.policy.manage = [u'andmanage']
store.put(recipe)
recipe = Recipe (u'recipe2')
recipe = store.get(recipe)
assert recipe.policy.manage == [u'andmanage']
# delete the above things
store.delete(bag)
py.test.raises(NoBagError, 'store.delete(bag)')
py.test.raises(NoBagError, 'store.get(bag)')
store.delete(recipe)
py.test.raises(NoRecipeError, 'store.delete(recipe)')
py.test.raises(NoRecipeError, 'store.get(recipe)')
store.delete(user)
py.test.raises(NoUserError, 'store.delete(user)')
py.test.raises(NoUserError, 'store.get(user)')
tiddler = Tiddler(u'tiddler9', u'bag9')
store.get(tiddler)
assert tiddler.bag == 'bag9'
assert tiddler.text == 'hey ho 9'
assert tiddler.tags == ['tag9']
assert tiddler.fields['field9'] == 'field9'
assert 'server.host' not in tiddler.fields
store.delete(tiddler)
py.test.raises(NoTiddlerError, 'store.delete(tiddler)')
py.test.raises(NoTiddlerError, 'store.get(tiddler)')
def test_binary_tiddler():
tiddler = Tiddler(u'binary', u'bag8')
tiddler.type = 'application/binary'
tiddler.text = u'not really binary'
store.put(tiddler)
new_tiddler = Tiddler(u'binary', u'bag8')
new_tiddler = store.get(new_tiddler)
assert new_tiddler.title == 'binary'
assert new_tiddler.type == 'application/binary'
assert tiddler.text == b64encode('not really binary')
def test_handle_empty_policy():
bag = Bag(u'empty')
store.put(bag)
new_bag = store.get(Bag(u'empty'))
assert new_bag.policy.read == []
assert new_bag.policy.manage == []
assert new_bag.policy.create == []
assert new_bag.policy.write == []
assert new_bag.policy.accept == []
assert new_bag.policy.owner == None
def test_tiddler_revisions():
bag_name = u'bag8'
for i in xrange(20):
tiddler = Tiddler(u'oh hi', bag_name)
tiddler.text = u'%s times we go' % i
tiddler.fields[u'%s' % i] = u'%s' % i
tiddler.fields[u'other%s' % i] = u'%s' % i
tiddler.fields[u'carutther%s' % i] = u'x%s' % i
store.put(tiddler)
revisions = store.list_tiddler_revisions(Tiddler('oh hi', bag_name))
assert len(revisions) == 20
first_revision = revisions[-1]
tiddler = Tiddler(u'oh hi', bag_name)
tiddler.revision = first_revision + 13
tiddler = store.get(tiddler)
assert tiddler.title == 'oh hi'
assert tiddler.text == '13 times we go'
assert tiddler.fields['13'] == '13'
assert tiddler.fields['other13'] == '13'
assert tiddler.fields['carutther13'] == 'x13'
assert '12' not in tiddler.fields
tiddler.revision = 90
py.test.raises(NoTiddlerError, 'store.get(tiddler)')
py.test.raises(NoTiddlerError,
'store.list_tiddler_revisions(Tiddler(u"sleepy", u"cow"))')
def test_interleaved_tiddler_revisions():
bag_name = u'bag8'
for i in xrange(20):
tiddler1 = Tiddler(u'oh yes', bag_name)
tiddler2 = Tiddler(u'oh no', bag_name)
tiddler1.text = u'%s times we yes' % i
tiddler2.text = u'%s times we no' % i
tiddler1.fields[u'%s' % i] = u'%s' % i
tiddler2.fields[u'%s' % i] = u'%s' % i
store.put(tiddler1)
store.put(tiddler2)
revisions = store.list_tiddler_revisions(Tiddler('oh yes', bag_name))
assert len(revisions) == 20
first_revision = revisions[-1]
tiddler = Tiddler('oh yes', bag_name)
tiddler.revision = first_revision + 26
tiddler = store.get(tiddler)
assert tiddler.title == 'oh yes'
assert tiddler.text == '13 times we yes'
assert tiddler.fields['13'] == '13'
assert '12' not in tiddler.fields
tiddler.revision = 9999999 # big number to avoid auto increment issues
py.test.raises(NoTiddlerError, 'store.get(tiddler)')
py.test.raises(NoTiddlerError,
'store.list_tiddler_revisions(Tiddler(u"sleepy", u"cow"))')
def test_tiddler_no_bag():
tiddler = Tiddler(u'hi')
py.test.raises(NoBagError, 'store.put(tiddler)')
def test_list_tiddlers_no_bag():
bag = Bag(u'carne')
try:
py.test.raises(NoBagError, 'store.list_bag_tiddlers(bag).next()')
except AttributeError:
assert True
def xtest_case_sensitive():
bag = Bag(u'testcs')
store.put(bag)
tiddlera = Tiddler(u'testtiddler', u'testcs')
tiddlera.text = u'a'
store.put(tiddlera)
tiddlerb = Tiddler(u'TestTiddler', u'testcs')
tiddlerb.text = u'b'
store.put(tiddlerb)
tiddlerc = Tiddler(u'TestTiddler', u'testcs')
tiddlerc = store.get(tiddlerc)
assert tiddlerc.text == u'b'
tiddlerd = Tiddler(u'testtiddler', u'testcs')
tiddlerd = store.get(tiddlerd)
assert tiddlerd.text == u'a'
def test_2bag_policy():
bag = Bag(u'pone')
bag.policy.read = [u'cdent']
bag.policy.write = [u'cdent']
store.put(bag)
bag = Bag(u'ptwo')
bag.policy.read = [u'cdent', u'fnd']
bag.policy.write = [u'cdent']
store.put(bag)
pone = store.get(Bag(u'pone'))
ptwo = store.get(Bag(u'ptwo'))
assert pone.policy.read == [u'cdent']
assert pone.policy.write == [u'cdent']
assert sorted(ptwo.policy.read) == [u'cdent', u'fnd']
assert ptwo.policy.write == [u'cdent']
store.delete(pone)
ptwo = store.get(Bag(u'ptwo'))
assert sorted(ptwo.policy.read) == [u'cdent', u'fnd']
assert ptwo.policy.write == [u'cdent']
bag = Bag(u'pone')
bag.policy.read = [u'cdent']
bag.policy.write = [u'cdent']
store.put(bag)
pone = store.get(Bag(u'pone'))
assert pone.policy.read == [u'cdent']
assert pone.policy.write == [u'cdent']
pone.policy.read.append(u'fnd')
store.put(pone)
pone = store.get(Bag(u'pone'))
assert sorted(pone.policy.read) == [u'cdent', u'fnd']
def test_2recipe_policy():
recipe = Recipe(u'pone')
recipe.policy.read = [u'cdent']
recipe.policy.write = [u'cdent']
store.put(recipe)
recipe = Recipe(u'ptwo')
recipe.policy.read = [u'cdent', u'fnd']
recipe.policy.write = [u'cdent']
store.put(recipe)
pone = store.get(Recipe(u'pone'))
ptwo = store.get(Recipe(u'ptwo'))
assert pone.policy.read == [u'cdent']
assert pone.policy.write == [u'cdent']
assert sorted(ptwo.policy.read) == [u'cdent', u'fnd']
assert ptwo.policy.write == [u'cdent']
store.delete(pone)
ptwo = store.get(Recipe(u'ptwo'))
assert sorted(ptwo.policy.read) == [u'cdent', u'fnd']
assert ptwo.policy.write == [u'cdent']
recipe = Recipe(u'pone')
recipe.policy.read = [u'cdent']
recipe.policy.write = [u'cdent']
store.put(recipe)
pone = store.get(Recipe(u'pone'))
assert pone.policy.read == [u'cdent']
assert pone.policy.write == [u'cdent']
pone.policy.read.append(u'fnd')
store.put(pone)
pone = store.get(Recipe(u'pone'))
assert sorted(pone.policy.read) == [u'cdent', u'fnd']
def test_revisions_deletions():
tiddler = Tiddler(u'tone', u'pone')
tiddler.text = u'revision1'
tiddler.tags = [u'1', u'2']
store.put(tiddler)
tiddler.text = u'revision2'
tiddler.tags = [u'3', u'4']
store.put(tiddler)
revisions = store.list_tiddler_revisions(tiddler)
assert len(revisions) == 2
store.delete(tiddler)
py.test.raises(NoTiddlerError, 'store.list_tiddler_revisions(tiddler)')
def test_bag_deletes_tiddlers():
tiddler = Tiddler(u'tone', u'pone')
tiddler.text = u''
store.put(tiddler)
tiddler = Tiddler(u'uone', u'pone')
tiddler.text = u''
store.put(tiddler)
bag = Bag(u'pone')
tiddlers = list(store.list_bag_tiddlers(bag))
assert len(tiddlers) == 2
store.delete(bag)
bag = Bag(u'pone')
py.test.raises(NoBagError, 'list(store.list_bag_tiddlers(bag))')
py.test.raises(NoTiddlerError, 'store.list_tiddler_revisions(tiddler)')
def test_multi_same_tag_tiddler():
bag = Bag(u'holder')
store.put(bag)
tiddler = Tiddler(u'me', u'holder')
tiddler.text = 'hi'
tiddler.tags = ['foo']
store.put(tiddler)
tiddler2 = Tiddler(u'me', u'holder')
tiddler2 = store.get(tiddler2)
tiddler2.tags.append('bar')
tiddler2.tags.append('bar')
store.put(tiddler2)
tiddler3 = store.get(Tiddler(u'me', u'holder'))
assert sorted(tiddler3.tags) == ['bar', 'foo']
def test_multi_role_user():
user = User(u'cdent')
user.add_role(u'cow')
user.add_role(u'cow')
store.put(user)
user2 = store.get(User(u'cdent'))
assert list(user2.roles) == ['cow']
def test_long_tiddler_title():
long_title = u'I would not do that if I were you, it might have consequences more than dire than you could possibly imagine. So dire you might have an oh no moment something severe.'
tiddler1 = Tiddler(long_title + '1', u'holder')
tiddler1.text = 'tiddler1'
tiddler2 = Tiddler(long_title + '1', u'holder')
tiddler2.text = 'tiddler2'
py.test.raises(TypeError, 'store.put(tiddler1)')
py.test.raises(TypeError, 'store.put(tiddler2)')
py.test.raises(NoTiddlerError, 'store.get(tiddler1)')
py.test.raises(NoTiddlerError, 'store.get(tiddler2)')
@py.test.mark.xfail
def test_emoji_title():
"""
We expect this to fail because we're using a) old mysql
b) without the utf8mb4 encoding type.
See: https://github.com/TiddlySpace/tiddlyspace/issues/1033
The fix is to use mysql 5.5 or beyond.
"""
# emoji smiley of some sort
title = '\xF0\x9F\x98\x97'.decode('utf-8')
store.put(Bag(title))
tiddler = Tiddler(title, title)
tiddler.text = u'some stuff and zomg %s' % title
tiddler.tags = [title]
tiddler.fields[title] = title
store.put(tiddler)
tiddler2 = store.get(Tiddler(title, title))
assert tiddler2.title == title
assert tiddler2.text == tiddler.text
assert tiddler2.tags == tiddler.tags
assert tiddler2.tags[0] == title
assert tiddler2.fields[title] == tiddler.fields[title]
assert tiddler2.fields[title] == title
|
|
import os
import numpy as np
import pandas as pd
from lingam.base import _BaseLiNGAM
from lingam.causal_effect import CausalEffect
class DummyLiNGAM(_BaseLiNGAM):
def __init__(self, random_state=None, causal_order=None, adjacency_matrix=None):
super().__init__(random_state)
self._causal_order = causal_order
self._adjacency_matrix = adjacency_matrix
def fit(self, X):
pass
class DummyPrediction(object):
def __init__(self, reval_predict=None, coef=None, intercept=None):
self._reval_predict = reval_predict
self._coef = coef
self._intercept = intercept
def predict(self, X):
return self._reval_predict
@property
def coef_(self):
return self._coef
@property
def intercept_(self):
return self._intercept
class DummyPrediction2(object):
def __init__(self, reval_predict=None, coef=None, intercept=None):
self._reval_predict = reval_predict
self._coef = coef
self._intercept = intercept
def predict_proba(self, X):
return self._reval_predict
@property
def coef_(self):
return self._coef
@property
def intercept_(self):
return self._intercept
def test_estimate_effects_on_prediction_success():
# causal direction: x0 --> x1, x0 --> x2, x1 --> x2
x0 = np.random.uniform(size=1000)
x1 = -1.0*x0 + np.random.uniform(size=1000)
x2 = 3.0*x0 + 2.0*x1 + np.random.uniform(size=1000)
X = pd.DataFrame(np.array([x0, x1, x2]).T, columns=['x0', 'x1', 'x2'])
# specified LiNGAM for causal model
model = DummyLiNGAM(causal_order=[0, 1, 2],
adjacency_matrix=np.array([
[ 0., 0., 0.],
[-1., 0., 0.],
[ 3., 2., 0.],
]))
ce = CausalEffect(model)
pred_model = DummyPrediction(reval_predict=np.array([0]))
effects = ce.estimate_effects_on_prediction(X, 2, pred_model)
assert effects.shape == (3, 2)
# retry
effects = ce.estimate_effects_on_prediction(X, 2, pred_model)
En = np.array([0.1, 0.1, 0.1])
effects = ce._get_propagated_effects(En, 0, 1)
assert effects[0] == 1
assert effects[1] == -0.9
assert effects[2] == (3.0+(-1.8)+0.1)
# specified np.array for causal model
ce = CausalEffect(np.array([[0., -1., 0.], [ 0., 0., 0.], [ 3., 2., 0.]]))
pred_model = DummyPrediction(reval_predict=np.array([0]))
effects = ce.estimate_effects_on_prediction(X, 2, pred_model)
assert effects.shape == (3, 2)
# specified list for causal model
ce = CausalEffect([[0., -1., 0.], [ 0., 0., 0.], [ 3., 2., 0.]])
pred_model = DummyPrediction(reval_predict=np.array([0]))
effects = ce.estimate_effects_on_prediction(X, 2, pred_model)
assert effects.shape == (3, 2)
# predict_proba
ce = CausalEffect([[0., -1., 0.], [ 0., 0., 0.], [ 3., 2., 0.]])
pred_model = DummyPrediction2(reval_predict=np.array([[0, 0]]))
effects = ce.estimate_effects_on_prediction(X, 2, pred_model)
assert effects.shape == (3, 2)
def test_estimate_effects_on_prediction_invalid_input():
# causal direction: x0 --> x1, x0 --> x2, x1 --> x2
x0 = np.random.uniform(size=1000)
x1 = -1.0*x0 + np.random.uniform(size=1000)
x2 = 3.0*x0 + 2.0*x1 + np.random.uniform(size=1000)
X = pd.DataFrame(np.array([x0, x1, x2]).T, columns=['x0', 'x1', 'x2'])
# Not causal model
try:
ce = CausalEffect(1)
pred_model = DummyPrediction(reval_predict=np.array([0]))
ce.estimate_effects_on_prediction(X, 2, pred_model)
except ValueError:
pass
else:
raise AssertionError
# Not matrix
try:
ce = CausalEffect(np.array([0, 1, 2]))
pred_model = DummyPrediction(reval_predict=np.array([0]))
ce.estimate_effects_on_prediction(X, 2, pred_model)
except ValueError:
pass
else:
raise AssertionError
try:
ce = CausalEffect([0, 1, 2])
pred_model = DummyPrediction(reval_predict=np.array([0]))
ce.estimate_effects_on_prediction(X, 2, pred_model)
except ValueError:
pass
else:
raise AssertionError
# Not square matrix
try:
ce = CausalEffect(np.array([[0, 1, 2], [0, 0, 0]]))
pred_model = DummyPrediction(reval_predict=np.array([0]))
ce.estimate_effects_on_prediction(X, 2, pred_model)
except ValueError:
pass
else:
raise AssertionError
try:
ce = CausalEffect([[0, 1, 2], [0, 0, 0]])
pred_model = DummyPrediction(reval_predict=np.array([0]))
ce.estimate_effects_on_prediction(X, 2, pred_model)
except ValueError:
pass
else:
raise AssertionError
# Not triangular matrix
try:
ce = CausalEffect(np.array([[0, 1, 2], [1, 1, 1], [2, 3, 4]]))
pred_model = DummyPrediction(reval_predict=np.array([0]))
ce.estimate_effects_on_prediction(X, 2, pred_model)
except ValueError:
pass
else:
raise AssertionError
try:
ce = CausalEffect([[0, 1, 2], [1, 1, 1], [2, 3, 4]])
pred_model = DummyPrediction(reval_predict=np.array([0]))
ce.estimate_effects_on_prediction(X, 2, pred_model)
except ValueError:
pass
else:
raise AssertionError
# Not prediction model
try:
ce = CausalEffect(np.array([[0, 0, 0], [1, 0, 0], [2, 2, 0]]))
pred_model = [0]
ce.estimate_effects_on_prediction(X, 2, pred_model)
except ValueError:
pass
else:
raise AssertionError
def test_estimate_optimal_intervention_success():
# causal direction: x0 --> x1, x0 --> x2, x1 --> x2
x0 = np.random.uniform(size=1000)
x1 = -1.0*x0 + np.random.uniform(size=1000)
x2 = 3.0*x0 + 2.0*x1 + np.random.uniform(size=1000)
X = pd.DataFrame(np.array([x0, x1, x2]).T, columns=['x0', 'x1', 'x2'])
# specified LiNGAM for causal model
model = DummyLiNGAM(causal_order=[0, 1, 2],
adjacency_matrix=np.array([
[ 0., 0., 0.],
[-1., 0., 0.],
[ 3., 2., 0.],
]))
ce = CausalEffect(model)
pred_model = DummyPrediction(reval_predict=np.array([1]), coef=np.array([1, 2]), intercept=0)
optimal_intervention = ce.estimate_optimal_intervention(X, 2, pred_model, 0, 0)
assert optimal_intervention > 0.95 and optimal_intervention < 1.05
# specified np.array for causal model
ce = CausalEffect(np.array([[ 0., 0., 0.], [-1., 0., 0.], [ 3., 2., 0.]]))
pred_model = DummyPrediction(reval_predict=np.array([1]), coef=np.array([1, 2]), intercept=0)
optimal_intervention = ce.estimate_optimal_intervention(X, 2, pred_model, 0, 0)
assert optimal_intervention > 0.95 and optimal_intervention < 1.05
# specified list for causal model
ce = CausalEffect([[ 0., 0., 0.], [-1., 0., 0.], [ 3., 2., 0.]])
pred_model = DummyPrediction(reval_predict=np.array([1]), coef=np.array([1, 2]), intercept=0)
optimal_intervention = ce.estimate_optimal_intervention(X, 2, pred_model, 0, 0)
assert optimal_intervention > 0.95 and optimal_intervention < 1.05
def test_estimate_optimal_intervention_invalid_input():
# causal direction: x0 --> x1, x0 --> x2, x1 --> x2
x0 = np.random.uniform(size=1000)
x1 = -1.0*x0 + np.random.uniform(size=1000)
x2 = 3.0*x0 + 2.0*x1 + np.random.uniform(size=1000)
X = pd.DataFrame(np.array([x0, x1, x2]).T, columns=['x0', 'x1', 'x2'])
# Not causal model
try:
ce = CausalEffect(1)
pred_model = DummyPrediction(reval_predict=np.array([0]))
ce.estimate_optimal_intervention(X, 2, pred_model, 0, 0)
except ValueError:
pass
else:
raise AssertionError
# Not matrix
try:
ce = CausalEffect(np.array([0, 1, 2]))
pred_model = DummyPrediction(reval_predict=np.array([0]))
ce.estimate_optimal_intervention(X, 2, pred_model, 0, 0)
except ValueError:
pass
else:
raise AssertionError
try:
ce = CausalEffect([0, 1, 2])
pred_model = DummyPrediction(reval_predict=np.array([0]))
ce.estimate_optimal_intervention(X, 2, pred_model, 0, 0)
except ValueError:
pass
else:
raise AssertionError
# Not square matrix
try:
ce = CausalEffect(np.array([[0, 1, 2], [0, 0, 0]]))
pred_model = DummyPrediction(reval_predict=np.array([0]))
ce.estimate_optimal_intervention(X, 2, pred_model, 0, 0)
except ValueError:
pass
else:
raise AssertionError
try:
ce = CausalEffect([[0, 1, 2], [0, 0, 0]])
pred_model = DummyPrediction(reval_predict=np.array([0]))
ce.estimate_optimal_intervention(X, 2, pred_model, 0, 0)
except ValueError:
pass
else:
raise AssertionError
# Not triangular matrix
try:
ce = CausalEffect(np.array([[0, 1, 2], [1, 1, 1], [2, 3, 4]]))
pred_model = DummyPrediction(reval_predict=np.array([0]))
ce.estimate_optimal_intervention(X, 2, pred_model, 0, 0)
except ValueError:
pass
else:
raise AssertionError
try:
ce = CausalEffect([[0, 1, 2], [1, 1, 1], [2, 3, 4]])
pred_model = DummyPrediction(reval_predict=np.array([0]))
ce.estimate_optimal_intervention(X, 2, pred_model, 0, 0)
except ValueError:
pass
else:
raise AssertionError
# Not prediction model
try:
ce = CausalEffect(np.array([[0, 0, 0], [1, 0, 0], [2, 2, 0]]))
pred_model = [0]
ce.estimate_optimal_intervention(X, 2, pred_model, 0, 0)
except ValueError:
pass
else:
raise AssertionError
# predict_proba
try:
ce = CausalEffect([[ 0., 0., 0.], [-1., 0., 0.], [ 3., 2., 0.]])
pred_model = DummyPrediction2(reval_predict=np.array([[1, 1]]), coef=np.array([1, 2]), intercept=0)
ce.estimate_optimal_intervention(X, 2, pred_model, 0, 0)
except ValueError:
pass
else:
raise AssertionError
|
|
#!/usr/bin/env python
from __future__ import print_function
from indic_transliteration import sanscript
from indic_transliteration import detect
from sanskrit_parser.util import normalization
from contextlib import contextmanager
import logging
import six
# Wrap scheme names defined in sanscript
BENGALI = sanscript.BENGALI
DEVANAGARI = sanscript.DEVANAGARI
GUJARATI = sanscript.GUJARATI
GURMUKHI = sanscript.GURMUKHI
KANNADA = sanscript.KANNADA
MALAYALAM = sanscript.MALAYALAM
ORIYA = sanscript.ORIYA
TAMIL = sanscript.TAMIL
TELUGU = sanscript.TELUGU
HK = sanscript.HK
IAST = sanscript.IAST
ITRANS = sanscript.ITRANS
KOLKATA = sanscript.KOLKATA
SLP1 = sanscript.SLP1
VELTHUIS = sanscript.VELTHUIS
WX = sanscript.WX
# Dict defined so autodetect can work
SCHEMES = {
'Bengali': BENGALI,
'Devanagari': DEVANAGARI,
'Gujarati': GUJARATI,
'Gurmukhi': GURMUKHI,
'Kannada': KANNADA,
'Malayalam': MALAYALAM,
'Oriya': ORIYA,
'Tamil': TAMIL,
'Telugu': TELUGU,
'HK': HK,
'IAST': IAST,
'ITRANS': ITRANS,
'Kolkata': KOLKATA,
'SLP1': SLP1,
'Velthuis': VELTHUIS,
'WX': WX
}
logger = logging.getLogger(__name__)
denormalize = False
class SanskritString(object):
""" Sanskrit String Class: Base of the class hierarchy
Attributes:
thing(str) : thing to be represented
encoding(str): SanskritBase encoding of thing as passed (eg: SanskritBase.HK, SanskritBase.DEVANAGARI)
Args:
thing(str): As above
encoding(str): As above
"""
def __init__(self, thing, encoding=None, unicode_encoding='utf-8'):
assert isinstance(thing, six.string_types)
# Encode early, unicode everywhere, decode late is the philosophy
# However, we need to accept both unicode and non unicode strings
# We are udAramatiH
if isinstance(thing, six.text_type):
self.thing = thing
else:
self.thing = six.text_type(thing, unicode_encoding)
if encoding is None:
# Autodetect Encoding
encoding = SCHEMES[detect.detect(self.thing)]
if encoding != SLP1:
# Convert to SLP1
self.thing = sanscript.transliterate(self.thing, encoding, SLP1)
# At this point, we are guaranteed that internal
# representation is in SLP1
def transcoded(self, encoding=None, strict_io=True):
""" Return a transcoded version of self
Args:
encoding(SanskritObject.Scheme):
Returns:
str: transcoded version
"""
s = self.thing
if not strict_io:
s = normalization.denormalize(s)
return sanscript.transliterate(s, SLP1, encoding)
def canonical(self, strict_io=True):
""" Return canonical transcoding (SLP1) of self
"""
return self.transcoded(SLP1, strict_io)
def devanagari(self, strict_io=True):
""" Return devanagari transcoding of self
"""
return self.transcoded(DEVANAGARI, strict_io)
# Updates internal string, leaves everything else alone
# Not to be used in all cases, as this is very limited
def update(self, s, encoding=None):
self.thing = s
if encoding is not None:
self.encoding = encoding
def __str__(self):
global denormalize
s = self.transcoded(SLP1)
if denormalize:
s = normalization.denormalize(s)
return s
def __repr__(self):
return str(self)
def __getitem__(self, i):
return self.canonical()[i]
def __len__(self):
return len(self.canonical())
class SanskritImmutableString(SanskritString):
""" Immutable version of SanskritString
"""
def __init__(self, thing=None, encoding=None, unicode_encoding='utf-8'):
super().__init__(thing, encoding, unicode_encoding)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
class SanskritNormalizedString(SanskritString):
""" SanskritString plus Normalization of input
"""
def __init__(self, thing=None, encoding=None, unicode_encoding='utf-8',
strict_io=True, replace_ending_visarga='s'):
super().__init__(thing, encoding, unicode_encoding)
if not strict_io:
# Normalize
logger.debug("Before normalization: %s", self.thing)
tmp = normalization.normalize(self.thing)
if replace_ending_visarga == 's':
self.thing = normalization.replace_ending_visarga_s(tmp)
elif replace_ending_visarga == 'r':
self.thing = normalization.replace_ending_visarga_r(tmp)
else:
self.thing = tmp
# Lazy Anusvaras (see issue #103)
try:
self.thing = sanscript.SCHEMES[sanscript.SLP1].fix_lazy_anusvaara(self.thing)
except (NameError, AttributeError):
print("Not fixing lazy anusvaras, you probably have an older version of indic_transliteration")
logger.debug("After normalization: %s", self.thing)
class SanskritObject(SanskritNormalizedString):
""" Sanskrit Object Class: Derived From SanskritString
Attributes:
"""
def __init__(self, thing=None, encoding=None, unicode_encoding='utf-8',
strict_io=True, replace_ending_visarga='s'):
super().__init__(thing, encoding, unicode_encoding, strict_io, replace_ending_visarga)
# Tags will go here as
self.tags = []
def setMorphologicalTags(self, t):
""" Set Morphological Tags on Sanskrit Object
Params:
t (list): List of morphological tags
"""
self.tags.extend(t)
return self.tags
def getMorphologicalTags(self):
""" Morphological Tags on object """
return self.tags
@contextmanager
def outputctx(strict_io):
global denormalize
save_denormalize = denormalize
denormalize = not strict_io
yield
denormalize = save_denormalize
if __name__ == "__main__":
import argparse
def getArgs():
"""
Argparse routine.
Returns args variable
"""
# Parser Setup
parser = argparse.ArgumentParser(description='SanskritObject')
# String to encode
parser.add_argument('data', nargs="?", type=str, default="idam adbhutam")
# Input Encoding (autodetect by default)
parser.add_argument('--input-encoding', type=str, default=None)
# Output Encoding (Devanagari by default)
parser.add_argument('--output-encoding', type=str, default="Devanagari")
return parser.parse_args()
def main():
args = getArgs()
print(args.data)
if args.input_encoding is None:
ie = None
else:
ie = SCHEMES[args.input_encoding]
oe = SCHEMES[args.output_encoding]
s = SanskritObject(args.data, ie)
print(s.transcoded(oe))
main()
|
|
# Unit 5: Probability in the game of Darts
"""
In the game of darts, players throw darts at a board to score points.
The circular board has a 'bulls-eye' in the center and 20 slices
called sections, numbered 1 to 20, radiating out from the bulls-eye.
The board is also divided into concentric rings. The bulls-eye has
two rings: an outer 'single' ring and an inner 'double' ring. Each
section is divided into 4 rings: starting at the center we have a
thick single ring, a thin triple ring, another thick single ring, and
a thin double ring. A ring/section combination is called a 'target';
they have names like 'S20', 'D20' and 'T20' for single, double, and
triple 20, respectively; these score 20, 40, and 60 points. The
bulls-eyes are named 'SB' and 'DB', worth 25 and 50 points
respectively. Illustration (png image): http://goo.gl/i7XJ9
There are several variants of darts play; in the game called '501',
each player throws three darts per turn, adding up points until they
total exactly 501. However, the final dart must be in a double ring.
Your first task is to write the function double_out(total), which will
output a list of 1 to 3 darts that add up to total, with the
restriction that the final dart is a double. See test_darts() for
examples. Return None if there is no list that achieves the total.
Often there are several ways to achieve a total. You must return a
shortest possible list, but you have your choice of which one. For
example, for total=100, you can choose ['T20', 'D20'] or ['DB', 'DB']
but you cannot choose ['T20', 'D10', 'D10'].
"""
def test_darts():
"Test the double_out function."
assert double_out(170) == ['T20', 'T20', 'DB']
assert double_out(171) == None
assert double_out(100) in (['T20', 'D20'], ['DB', 'DB'])
"""
My strategy: I decided to choose the result that has the highest valued
target(s) first, e.g. always take T20 on the first dart if we can achieve
a solution that way. If not, try T19 first, and so on. At first I thought
I would need three passes: first try to solve with one dart, then with two,
then with three. But I realized that if we include 0 as a possible dart
value, and always try the 0 first, then we get the effect of having three
passes, but we only have to code one pass. So I creted ordered_points as
a list of all possible scores that a single dart can achieve, with 0 first,
and then descending: [0, 60, 57, ..., 1]. I iterate dart1 and dart2 over
that; then dart3 must be whatever is left over to add up to total. If
dart3 is a valid element of points, then we have a solution. But the
solution, is a list of numbers, like [0, 60, 40]; we need to transform that
into a list of target names, like ['T20', 'D20'], we do that by defining name(d)
to get the name of a target that scores d. When there are several choices,
we must choose a double for the last dart, but for the others I prefer the
easiest targets first: 'S' is easiest, then 'T', then 'D'.
"""
SINGLES = set(range(1, 21) + [25])
DOUBLES = set([n*2 for n in SINGLES])
TRIPLES = set([n*3 for n in SINGLES if n != 25])
SCORES = sorted(SINGLES | DOUBLES | TRIPLES, reverse=True)
def double_out(total):
"""Return a shortest possible list of targets that add to total,
where the length <= 3 and the final element is a double.
If there is no solution, return None."""
def throw(scores):
current = sum(scores)
if current > total:
return None
if current == total:
return scores if scores[-1] in DOUBLES else None
if len(scores) < 3:
for d in SCORES:
ret = throw(scores + [d])
if ret is not None:
return ret
scores = throw([])
return [dart(s, i == len(scores)-1) for i, s in enumerate(scores)] if scores is not None else None
def dart(score, is_last):
if score in DOUBLES and is_last:
return 'D' + notation(score // 2)
elif score in SINGLES:
return 'S' + notation(score // 3)
elif score in TRIPLES:
return 'T' + notation(score // 3)
if score in DOUBLES:
return 'D' + notation(score // 2)
else:
raise ValueError('Invalid score')
def notation(number):
return str(number) if number != 25 else 'B'
"""
It is easy enough to say "170 points? Easy! Just hit T20, T20, DB."
But, at least for me, it is much harder to actually execute the plan
and hit each target. In this second half of the question, we
investigate what happens if the dart-thrower is not 100% accurate.
We will use a wrong (but still useful) model of inaccuracy. A player
has a single number from 0 to 1 that characterizes his/her miss rate.
If miss=0.0, that means the player hits the target every time.
But if miss is, say, 0.1, then the player misses the section s/he
is aiming at 10% of the time, and also (independently) misses the thin
double or triple ring 10% of the time. Where do the misses go?
Here's the model:
First, for ring accuracy. If you aim for the triple ring, all the
misses go to a single ring (some to the inner one, some to the outer
one, but the model doesn't distinguish between these). If you aim for
the double ring (at the edge of the board), half the misses (e.g. 0.05
if miss=0.1) go to the single ring, and half off the board. (We will
agree to call the off-the-board 'target' by the name 'OFF'.) If you
aim for a thick single ring, it is about 5 times thicker than the thin
rings, so your miss ratio is reduced to 1/5th, and of these, half go to
the double ring and half to the triple. So with miss=0.1, 0.01 will go
to each of the double and triple ring. Finally, for the bulls-eyes. If
you aim for the single bull, 1/4 of your misses go to the double bull and
3/4 to the single ring. If you aim for the double bull, it is tiny, so
your miss rate is tripled; of that, 2/3 goes to the single ring and 1/3
to the single bull ring.
Now, for section accuracy. Half your miss rate goes one section clockwise
and half one section counter-clockwise from your target. The clockwise
order of sections is:
20 1 18 4 13 6 10 15 2 17 3 19 7 16 8 11 14 9 12 5
If you aim for the bull (single or double) and miss on rings, then the
section you end up on is equally possible among all 20 sections. But
independent of that you can also miss on sections; again such a miss
is equally likely to go to any section and should be recorded as being
in the single ring.
You will need to build a model for these probabilities, and define the
function outcome(target, miss), which takes a target (like 'T20') and
a miss ration (like 0.1) and returns a dict of {target: probability}
pairs indicating the possible outcomes. You will also define
best_target(miss) which, for a given miss ratio, returns the target
with the highest expected score.
If you are very ambitious, you can try to find the optimal strategy for
accuracy-limited darts: given a state defined by your total score
needed and the number of darts remaining in your 3-dart turn, return
the target that minimizes the expected number of total 3-dart turns
(not the number of darts) required to reach the total. This is harder
than Pig for several reasons: there are many outcomes, so the search space
is large; also, it is always possible to miss a double, and thus there is
no guarantee that the game will end in a finite number of moves.
"""
SECTIONS = '20 1 18 4 13 6 10 15 2 17 3 19 7 16 8 11 14 9 12 5'.split()
ADJACENTS = {}
for i in range(1, len(SECTIONS)-1):
left, x, right = SECTIONS[i-1:i+2]
ADJACENTS[x] = [left, right]
ADJACENTS['20'] = ['5', '1']
ADJACENTS['5'] = ['12', '20']
ADJACENTS['B'] = SECTIONS
from collections import defaultdict
def outcome(target, miss):
"Return a probability distribution of [(target, probability)] pairs."
probabilities = [(t2, p2) for t, p in ring_miss(target, miss)
for t2, p2 in section_miss(t, p, miss)]
table = defaultdict(float)
for t, p in probabilities:
table[t] += p
return dict(table)
def ring_miss(target, miss):
ring, section = target[0], target[1:]
bull_section_miss_targets = ['S'+a for a in ADJACENTS['B']]
miss_events = ([('D'+section, 0.25*miss)] + \
unidist(bull_section_miss_targets, 0.75*miss) if target == 'SB' else
[('S'+section, miss)] + \
unidist(bull_section_miss_targets, 2*miss) if target == 'DB' else
[('S'+section, 0.5*miss)] if ring == 'D' else
[('S'+section, miss)] if ring == 'T' else
[('D'+section, 0.1*miss), ('T'+section, 0.1*miss)])
pass_event = (target, 1-sum(p for _, p in miss_events))
return [pass_event] + miss_events
def section_miss(target, p, miss):
ring, section = target[0], target[1:]
adjacents = ADJACENTS[section]
pass_event = (target, p*(1-miss))
miss_ring = ring if section != 'B' else 'S'
miss_events = unidist([miss_ring+a for a in adjacents], p*miss)
return [pass_event] + miss_events
def unidist(cases, total_p):
p = total_p / len(cases)
return [(c, p) for c in cases]
def best_target(miss):
"Return the target that maximizes the expected score."
targets = ['SB', 'DB'] + [r+s for r in 'SDT' for s in SECTIONS]
def E(target):
table = outcome(target, miss)
return sum(score(t)*p for t, p in table.viewitems())
return max(targets, key=E)
def score(target):
ring, section = target[0], target[1:]
mul = dict(S=1., D=2., T=3.)[ring]
point = int(section) if section != 'B' else 25
return mul * point
def same_outcome(dict1, dict2):
"Two states are the same if all corresponding sets of locs are the same."
return all(abs(dict1.get(key, 0) - dict2.get(key, 0)) <= 0.0001
for key in set(dict1) | set(dict2))
def test_darts2():
assert best_target(0.0) == 'T20'
assert best_target(0.1) == 'T20'
assert best_target(0.4) == 'T19'
assert same_outcome(outcome('T20', 0.0), {'T20': 1.0})
assert same_outcome(outcome('T20', 0.1),
{'T20': 0.81, 'S1': 0.005, 'T5': 0.045,
'S5': 0.005, 'T1': 0.045, 'S20': 0.09})
assert (same_outcome(
outcome('SB', 0.2),
{'S9': 0.016, 'S8': 0.016, 'S3': 0.016, 'S2': 0.016, 'S1': 0.016,
'DB': 0.04, 'S6': 0.016, 'S5': 0.016, 'S4': 0.016, 'S20': 0.016,
'S19': 0.016, 'S18': 0.016, 'S13': 0.016, 'S12': 0.016, 'S11': 0.016,
'S10': 0.016, 'S17': 0.016, 'S16': 0.016, 'S15': 0.016, 'S14': 0.016,
'S7': 0.016, 'SB': 0.64}))
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A "Test Server Spawner" that handles killing/stopping per-test test servers.
It's used to accept requests from the device to spawn and kill instances of the
chrome test server on the host.
"""
import BaseHTTPServer
import json
import logging
import os
import select
import struct
import subprocess
import sys
import threading
import time
import urlparse
import constants
import ports
from pylib.forwarder import Forwarder
# Path that are needed to import necessary modules when launching a testserver.
os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '') + (':%s:%s:%s:%s:%s'
% (os.path.join(constants.DIR_SOURCE_ROOT, 'third_party'),
os.path.join(constants.DIR_SOURCE_ROOT, 'third_party', 'tlslite'),
os.path.join(constants.DIR_SOURCE_ROOT, 'third_party', 'pyftpdlib',
'src'),
os.path.join(constants.DIR_SOURCE_ROOT, 'net', 'tools', 'testserver'),
os.path.join(constants.DIR_SOURCE_ROOT, 'sync', 'tools', 'testserver')))
SERVER_TYPES = {
'http': '',
'ftp': '-f',
'sync': '', # Sync uses its own script, and doesn't take a server type arg.
'tcpecho': '--tcp-echo',
'udpecho': '--udp-echo',
}
# The timeout (in seconds) of starting up the Python test server.
TEST_SERVER_STARTUP_TIMEOUT = 10
def _WaitUntil(predicate, max_attempts=5):
"""Blocks until the provided predicate (function) is true.
Returns:
Whether the provided predicate was satisfied once (before the timeout).
"""
sleep_time_sec = 0.025
for attempt in xrange(1, max_attempts):
if predicate():
return True
time.sleep(sleep_time_sec)
sleep_time_sec = min(1, sleep_time_sec * 2) # Don't wait more than 1 sec.
return False
def _CheckPortStatus(port, expected_status):
"""Returns True if port has expected_status.
Args:
port: the port number.
expected_status: boolean of expected status.
Returns:
Returns True if the status is expected. Otherwise returns False.
"""
return _WaitUntil(lambda: ports.IsHostPortUsed(port) == expected_status)
def _CheckDevicePortStatus(adb, port):
"""Returns whether the provided port is used."""
return _WaitUntil(lambda: ports.IsDevicePortUsed(adb, port))
def _GetServerTypeCommandLine(server_type):
"""Returns the command-line by the given server type.
Args:
server_type: the server type to be used (e.g. 'http').
Returns:
A string containing the command-line argument.
"""
if server_type not in SERVER_TYPES:
raise NotImplementedError('Unknown server type: %s' % server_type)
if server_type == 'udpecho':
raise Exception('Please do not run UDP echo tests because we do not have '
'a UDP forwarder tool.')
return SERVER_TYPES[server_type]
class TestServerThread(threading.Thread):
"""A thread to run the test server in a separate process."""
def __init__(self, ready_event, arguments, adb, tool):
"""Initialize TestServerThread with the following argument.
Args:
ready_event: event which will be set when the test server is ready.
arguments: dictionary of arguments to run the test server.
adb: instance of AndroidCommands.
tool: instance of runtime error detection tool.
"""
threading.Thread.__init__(self)
self.wait_event = threading.Event()
self.stop_flag = False
self.ready_event = ready_event
self.ready_event.clear()
self.arguments = arguments
self.adb = adb
self.tool = tool
self.test_server_process = None
self.is_ready = False
self.host_port = self.arguments['port']
assert isinstance(self.host_port, int)
# The forwarder device port now is dynamically allocated.
self.forwarder_device_port = 0
# Anonymous pipe in order to get port info from test server.
self.pipe_in = None
self.pipe_out = None
self.command_line = []
def _WaitToStartAndGetPortFromTestServer(self):
"""Waits for the Python test server to start and gets the port it is using.
The port information is passed by the Python test server with a pipe given
by self.pipe_out. It is written as a result to |self.host_port|.
Returns:
Whether the port used by the test server was successfully fetched.
"""
assert self.host_port == 0 and self.pipe_out and self.pipe_in
(in_fds, _, _) = select.select([self.pipe_in, ], [], [],
TEST_SERVER_STARTUP_TIMEOUT)
if len(in_fds) == 0:
logging.error('Failed to wait to the Python test server to be started.')
return False
# First read the data length as an unsigned 4-byte value. This
# is _not_ using network byte ordering since the Python test server packs
# size as native byte order and all Chromium platforms so far are
# configured to use little-endian.
# TODO(jnd): Change the Python test server and local_test_server_*.cc to
# use a unified byte order (either big-endian or little-endian).
data_length = os.read(self.pipe_in, struct.calcsize('=L'))
if data_length:
(data_length,) = struct.unpack('=L', data_length)
assert data_length
if not data_length:
logging.error('Failed to get length of server data.')
return False
port_json = os.read(self.pipe_in, data_length)
if not port_json:
logging.error('Failed to get server data.')
return False
logging.info('Got port json data: %s', port_json)
port_json = json.loads(port_json)
if port_json.has_key('port') and isinstance(port_json['port'], int):
self.host_port = port_json['port']
return _CheckPortStatus(self.host_port, True)
logging.error('Failed to get port information from the server data.')
return False
def _GenerateCommandLineArguments(self):
"""Generates the command line to run the test server.
Note that all options are processed by following the definitions in
testserver.py.
"""
if self.command_line:
return
# The following arguments must exist.
type_cmd = _GetServerTypeCommandLine(self.arguments['server-type'])
if type_cmd:
self.command_line.append(type_cmd)
self.command_line.append('--port=%d' % self.host_port)
# Use a pipe to get the port given by the instance of Python test server
# if the test does not specify the port.
if self.host_port == 0:
(self.pipe_in, self.pipe_out) = os.pipe()
self.command_line.append('--startup-pipe=%d' % self.pipe_out)
self.command_line.append('--host=%s' % self.arguments['host'])
data_dir = self.arguments['data-dir'] or 'chrome/test/data'
if not os.path.isabs(data_dir):
data_dir = os.path.join(constants.DIR_SOURCE_ROOT, data_dir)
self.command_line.append('--data-dir=%s' % data_dir)
# The following arguments are optional depending on the individual test.
if self.arguments.has_key('log-to-console'):
self.command_line.append('--log-to-console')
if self.arguments.has_key('auth-token'):
self.command_line.append('--auth-token=%s' % self.arguments['auth-token'])
if self.arguments.has_key('https'):
self.command_line.append('--https')
if self.arguments.has_key('cert-and-key-file'):
self.command_line.append('--cert-and-key-file=%s' % os.path.join(
constants.DIR_SOURCE_ROOT, self.arguments['cert-and-key-file']))
if self.arguments.has_key('ocsp'):
self.command_line.append('--ocsp=%s' % self.arguments['ocsp'])
if self.arguments.has_key('https-record-resume'):
self.command_line.append('--https-record-resume')
if self.arguments.has_key('ssl-client-auth'):
self.command_line.append('--ssl-client-auth')
if self.arguments.has_key('tls-intolerant'):
self.command_line.append('--tls-intolerant=%s' %
self.arguments['tls-intolerant'])
if self.arguments.has_key('ssl-client-ca'):
for ca in self.arguments['ssl-client-ca']:
self.command_line.append('--ssl-client-ca=%s' %
os.path.join(constants.DIR_SOURCE_ROOT, ca))
if self.arguments.has_key('ssl-bulk-cipher'):
for bulk_cipher in self.arguments['ssl-bulk-cipher']:
self.command_line.append('--ssl-bulk-cipher=%s' % bulk_cipher)
def _CloseUnnecessaryFDsForTestServerProcess(self):
# This is required to avoid subtle deadlocks that could be caused by the
# test server child process inheriting undesirable file descriptors such as
# file lock file descriptors.
for fd in xrange(0, 1024):
if fd != self.pipe_out:
try:
os.close(fd)
except:
pass
def run(self):
logging.info('Start running the thread!')
self.wait_event.clear()
self._GenerateCommandLineArguments()
command = constants.DIR_SOURCE_ROOT
if self.arguments['server-type'] == 'sync':
command = [os.path.join(command, 'sync', 'tools', 'testserver',
'sync_testserver.py')] + self.command_line
else:
command = [os.path.join(command, 'net', 'tools', 'testserver',
'testserver.py')] + self.command_line
logging.info('Running: %s', command)
self.process = subprocess.Popen(
command, preexec_fn=self._CloseUnnecessaryFDsForTestServerProcess)
if self.process:
if self.pipe_out:
self.is_ready = self._WaitToStartAndGetPortFromTestServer()
else:
self.is_ready = _CheckPortStatus(self.host_port, True)
if self.is_ready:
Forwarder.Map([(0, self.host_port)], self.adb, self.tool)
# Check whether the forwarder is ready on the device.
self.is_ready = False
device_port = Forwarder.DevicePortForHostPort(self.host_port)
if device_port and _CheckDevicePortStatus(self.adb, device_port):
self.is_ready = True
self.forwarder_device_port = device_port
# Wake up the request handler thread.
self.ready_event.set()
# Keep thread running until Stop() gets called.
_WaitUntil(lambda: self.stop_flag, max_attempts=sys.maxint)
if self.process.poll() is None:
self.process.kill()
Forwarder.UnmapDevicePort(self.forwarder_device_port, self.adb)
self.process = None
self.is_ready = False
if self.pipe_out:
os.close(self.pipe_in)
os.close(self.pipe_out)
self.pipe_in = None
self.pipe_out = None
logging.info('Test-server has died.')
self.wait_event.set()
def Stop(self):
"""Blocks until the loop has finished.
Note that this must be called in another thread.
"""
if not self.process:
return
self.stop_flag = True
self.wait_event.wait()
class SpawningServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler used to process http GET/POST request."""
def _SendResponse(self, response_code, response_reason, additional_headers,
contents):
"""Generates a response sent to the client from the provided parameters.
Args:
response_code: number of the response status.
response_reason: string of reason description of the response.
additional_headers: dict of additional headers. Each key is the name of
the header, each value is the content of the header.
contents: string of the contents we want to send to client.
"""
self.send_response(response_code, response_reason)
self.send_header('Content-Type', 'text/html')
# Specify the content-length as without it the http(s) response will not
# be completed properly (and the browser keeps expecting data).
self.send_header('Content-Length', len(contents))
for header_name in additional_headers:
self.send_header(header_name, additional_headers[header_name])
self.end_headers()
self.wfile.write(contents)
self.wfile.flush()
def _StartTestServer(self):
"""Starts the test server thread."""
logging.info('Handling request to spawn a test server.')
content_type = self.headers.getheader('content-type')
if content_type != 'application/json':
raise Exception('Bad content-type for start request.')
content_length = self.headers.getheader('content-length')
if not content_length:
content_length = 0
try:
content_length = int(content_length)
except:
raise Exception('Bad content-length for start request.')
logging.info(content_length)
test_server_argument_json = self.rfile.read(content_length)
logging.info(test_server_argument_json)
assert not self.server.test_server_instance
ready_event = threading.Event()
self.server.test_server_instance = TestServerThread(
ready_event,
json.loads(test_server_argument_json),
self.server.adb,
self.server.tool)
self.server.test_server_instance.setDaemon(True)
self.server.test_server_instance.start()
ready_event.wait()
if self.server.test_server_instance.is_ready:
self._SendResponse(200, 'OK', {}, json.dumps(
{'port': self.server.test_server_instance.forwarder_device_port,
'message': 'started'}))
logging.info('Test server is running on port: %d.',
self.server.test_server_instance.host_port)
else:
self.server.test_server_instance.Stop()
self.server.test_server_instance = None
self._SendResponse(500, 'Test Server Error.', {}, '')
logging.info('Encounter problem during starting a test server.')
def _KillTestServer(self):
"""Stops the test server instance."""
# There should only ever be one test server at a time. This may do the
# wrong thing if we try and start multiple test servers.
if not self.server.test_server_instance:
return
port = self.server.test_server_instance.host_port
logging.info('Handling request to kill a test server on port: %d.', port)
self.server.test_server_instance.Stop()
# Make sure the status of test server is correct before sending response.
if _CheckPortStatus(port, False):
self._SendResponse(200, 'OK', {}, 'killed')
logging.info('Test server on port %d is killed', port)
else:
self._SendResponse(500, 'Test Server Error.', {}, '')
logging.info('Encounter problem during killing a test server.')
self.server.test_server_instance = None
def do_POST(self):
parsed_path = urlparse.urlparse(self.path)
action = parsed_path.path
logging.info('Action for POST method is: %s.', action)
if action == '/start':
self._StartTestServer()
else:
self._SendResponse(400, 'Unknown request.', {}, '')
logging.info('Encounter unknown request: %s.', action)
def do_GET(self):
parsed_path = urlparse.urlparse(self.path)
action = parsed_path.path
params = urlparse.parse_qs(parsed_path.query, keep_blank_values=1)
logging.info('Action for GET method is: %s.', action)
for param in params:
logging.info('%s=%s', param, params[param][0])
if action == '/kill':
self._KillTestServer()
elif action == '/ping':
# The ping handler is used to check whether the spawner server is ready
# to serve the requests. We don't need to test the status of the test
# server when handling ping request.
self._SendResponse(200, 'OK', {}, 'ready')
logging.info('Handled ping request and sent response.')
else:
self._SendResponse(400, 'Unknown request', {}, '')
logging.info('Encounter unknown request: %s.', action)
class SpawningServer(object):
"""The class used to start/stop a http server."""
def __init__(self, test_server_spawner_port, adb, tool):
logging.info('Creating new spawner on port: %d.', test_server_spawner_port)
self.server = BaseHTTPServer.HTTPServer(('', test_server_spawner_port),
SpawningServerRequestHandler)
self.server.adb = adb
self.server.tool = tool
self.server.test_server_instance = None
self.server.build_type = constants.GetBuildType()
def _Listen(self):
logging.info('Starting test server spawner')
self.server.serve_forever()
def Start(self):
"""Starts the test server spawner."""
listener_thread = threading.Thread(target=self._Listen)
listener_thread.setDaemon(True)
listener_thread.start()
def Stop(self):
"""Stops the test server spawner.
Also cleans the server state.
"""
self.CleanupState()
self.server.shutdown()
def CleanupState(self):
"""Cleans up the spawning server state.
This should be called if the test server spawner is reused,
to avoid sharing the test server instance.
"""
if self.server.test_server_instance:
self.server.test_server_instance.Stop()
self.server.test_server_instance = None
|
|
import unittest
from pbxproj import XcodeProject, PBXProvisioningTypes
from pbxproj.pbxextensions import ProjectFlags
LS_LA_COMMAND = u'ls -la'
PATH_TO_SEARCH_PATTERN = 'path/to/search/**'
class ProjectFlagsTest(unittest.TestCase):
def setUp(self):
self.obj = {
'objects': {
'0': {'isa': 'PBXProject', 'buildConfigurationList': '4a'},
'1': {'isa': 'PBXNativeTarget', 'name': 'app', 'buildConfigurationList': '3',
'buildPhases': ['compile']},
'2': {'isa': 'PBXAggregateTarget', 'name': 'report', 'buildConfigurationList': '4',
'buildPhases': ['compile']},
'3': {'isa': 'XCConfigurationList', 'buildConfigurations': ['5', '6']},
'4': {'isa': 'XCConfigurationList', 'buildConfigurations': ['7', '8']},
'4a': {'isa': 'XCConfigurationList', 'buildConfigurations': ['9', '10']},
'5': {'isa': 'XCBuildConfiguration', 'name': 'Release', 'buildSettings': {'base': 'a'}},
'6': {'isa': 'XCBuildConfiguration', 'name': 'Debug', 'id': '6'},
'7': {'isa': 'XCBuildConfiguration', 'name': 'Release', 'id': '7'},
'8': {'isa': 'XCBuildConfiguration', 'name': 'Debug', 'id': '8'},
'9': {'isa': 'XCBuildConfiguration', 'name': 'Release', 'id': '9'},
'10': {'isa': 'XCBuildConfiguration', 'name': 'Debug', 'id': '10', 'buildSettings': {'base': 'x'}},
},
'rootObject': '0'
}
def testInit(self):
with self.assertRaisesRegex(EnvironmentError, '^This class cannot be instantiated directly'):
ProjectFlags()
def testAddFlags(self):
project = XcodeProject(self.obj)
project.add_flags('flag', '-flag')
self.assertEqual(project.objects['5'].buildSettings.flag, '-flag')
self.assertEqual(project.objects['6'].buildSettings.flag, '-flag')
self.assertEqual(project.objects['7'].buildSettings.flag, '-flag')
self.assertEqual(project.objects['8'].buildSettings.flag, '-flag')
def testAddProjectFlags(self):
project = XcodeProject(self.obj)
project.add_project_flags('flag', '-flag')
self.assertEqual(project.objects['9'].buildSettings.flag, '-flag')
self.assertEqual(project.objects['10'].buildSettings.flag, '-flag')
def testRemoveFlags(self):
project = XcodeProject(self.obj)
project.remove_flags('base', 'a')
self.assertIsNone(project.objects['5'].buildSettings['base'])
def testRemoveProjectFlags(self):
project = XcodeProject(self.obj)
project.remove_project_flags('base', 'x')
self.assertIsNone(project.objects['10'].buildSettings['base'])
def testAddOtherCFlags(self):
project = XcodeProject(self.obj)
project.add_other_cflags('-ObjC')
self.assertEqual(project.objects['5'].buildSettings.OTHER_CFLAGS, '-ObjC')
self.assertEqual(project.objects['6'].buildSettings.OTHER_CFLAGS, '-ObjC')
self.assertEqual(project.objects['7'].buildSettings.OTHER_CFLAGS, '-ObjC')
self.assertEqual(project.objects['8'].buildSettings.OTHER_CFLAGS, '-ObjC')
def testAddOtherLDFlags(self):
project = XcodeProject(self.obj)
project.add_other_ldflags('-ObjC')
self.assertEqual(project.objects['5'].buildSettings.OTHER_LDFLAGS, '-ObjC')
self.assertEqual(project.objects['6'].buildSettings.OTHER_LDFLAGS, '-ObjC')
self.assertEqual(project.objects['7'].buildSettings.OTHER_LDFLAGS, '-ObjC')
self.assertEqual(project.objects['8'].buildSettings.OTHER_LDFLAGS, '-ObjC')
def testRemoveOtherCFlags(self):
project = XcodeProject(self.obj)
project.add_other_cflags('-ObjC')
self.assertEqual(project.objects['5'].buildSettings.OTHER_CFLAGS, '-ObjC')
self.assertEqual(project.objects['6'].buildSettings.OTHER_CFLAGS, '-ObjC')
self.assertEqual(project.objects['7'].buildSettings.OTHER_CFLAGS, '-ObjC')
self.assertEqual(project.objects['8'].buildSettings.OTHER_CFLAGS, '-ObjC')
project.remove_other_cflags('-ObjC')
self.assertIsNone(project.objects['5'].buildSettings['OTHER_CFLAGS'])
self.assertIsNone(project.objects['6'].buildSettings['OTHER_CFLAGS'])
self.assertIsNone(project.objects['7'].buildSettings['OTHER_CFLAGS'])
self.assertIsNone(project.objects['8'].buildSettings['OTHER_CFLAGS'])
def testRemoveOtherLDFlags(self):
project = XcodeProject(self.obj)
project.add_other_ldflags('-ObjC')
self.assertEqual(project.objects['5'].buildSettings.OTHER_LDFLAGS, '-ObjC')
self.assertEqual(project.objects['6'].buildSettings.OTHER_LDFLAGS, '-ObjC')
self.assertEqual(project.objects['7'].buildSettings.OTHER_LDFLAGS, '-ObjC')
self.assertEqual(project.objects['8'].buildSettings.OTHER_LDFLAGS, '-ObjC')
project.remove_other_ldflags('-ObjC')
self.assertIsNone(project.objects['5'].buildSettings['OTHER_LDFLAGS'])
self.assertIsNone(project.objects['6'].buildSettings['OTHER_LDFLAGS'])
self.assertIsNone(project.objects['7'].buildSettings['OTHER_LDFLAGS'])
self.assertIsNone(project.objects['8'].buildSettings['OTHER_LDFLAGS'])
def testAddHeaderSearchPaths(self):
project = XcodeProject(self.obj)
project.add_header_search_paths('path/to/search')
self.assertEqual(project.objects['5'].buildSettings.HEADER_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
self.assertEqual(project.objects['6'].buildSettings.HEADER_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
self.assertEqual(project.objects['7'].buildSettings.HEADER_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
self.assertEqual(project.objects['8'].buildSettings.HEADER_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
def testAddLibrarySearchPaths(self):
project = XcodeProject(self.obj)
project.add_library_search_paths('path/to/search')
self.assertEqual(project.objects['5'].buildSettings.LIBRARY_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
self.assertEqual(project.objects['6'].buildSettings.LIBRARY_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
self.assertEqual(project.objects['7'].buildSettings.LIBRARY_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
self.assertEqual(project.objects['8'].buildSettings.LIBRARY_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
def testAddFrameworkSearchPaths(self):
project = XcodeProject(self.obj)
project.add_framework_search_paths('path/to/search')
self.assertEqual(project.objects['5'].buildSettings.FRAMEWORK_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
self.assertEqual(project.objects['6'].buildSettings.FRAMEWORK_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
self.assertEqual(project.objects['7'].buildSettings.FRAMEWORK_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
self.assertEqual(project.objects['8'].buildSettings.FRAMEWORK_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
def testRemoveHeaderSearchPaths(self):
project = XcodeProject(self.obj)
project.add_header_search_paths('path/to/search')
self.assertEqual(project.objects['5'].buildSettings.HEADER_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
self.assertEqual(project.objects['6'].buildSettings.HEADER_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
self.assertEqual(project.objects['7'].buildSettings.HEADER_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
self.assertEqual(project.objects['8'].buildSettings.HEADER_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
project.remove_header_search_paths(PATH_TO_SEARCH_PATTERN)
self.assertIsNone(project.objects['5'].buildSettings['HEADER_SEARCH_PATHS'])
self.assertIsNone(project.objects['6'].buildSettings['HEADER_SEARCH_PATHS'])
self.assertIsNone(project.objects['7'].buildSettings['HEADER_SEARCH_PATHS'])
self.assertIsNone(project.objects['8'].buildSettings['HEADER_SEARCH_PATHS'])
def testRemoveLibrarySearchPaths(self):
project = XcodeProject(self.obj)
project.add_library_search_paths('path/to/search')
self.assertEqual(project.objects['5'].buildSettings.LIBRARY_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
self.assertEqual(project.objects['6'].buildSettings.LIBRARY_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
self.assertEqual(project.objects['7'].buildSettings.LIBRARY_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
self.assertEqual(project.objects['8'].buildSettings.LIBRARY_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
project.remove_library_search_paths(PATH_TO_SEARCH_PATTERN)
self.assertIsNone(project.objects['5'].buildSettings['LIBRARY_SEARCH_PATHS'])
self.assertIsNone(project.objects['6'].buildSettings['LIBRARY_SEARCH_PATHS'])
self.assertIsNone(project.objects['7'].buildSettings['LIBRARY_SEARCH_PATHS'])
self.assertIsNone(project.objects['8'].buildSettings['LIBRARY_SEARCH_PATHS'])
def testRemoveFrameworkSearchPaths(self):
project = XcodeProject(self.obj)
project.add_framework_search_paths('path/to/search')
self.assertEqual(project.objects['5'].buildSettings.FRAMEWORK_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
self.assertEqual(project.objects['6'].buildSettings.FRAMEWORK_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
self.assertEqual(project.objects['7'].buildSettings.FRAMEWORK_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
self.assertEqual(project.objects['8'].buildSettings.FRAMEWORK_SEARCH_PATHS, PATH_TO_SEARCH_PATTERN)
project.remove_framework_search_paths(PATH_TO_SEARCH_PATTERN)
self.assertIsNone(project.objects['5'].buildSettings['FRAMEWORK_SEARCH_PATHS'])
self.assertIsNone(project.objects['6'].buildSettings['FRAMEWORK_SEARCH_PATHS'])
self.assertIsNone(project.objects['7'].buildSettings['FRAMEWORK_SEARCH_PATHS'])
self.assertIsNone(project.objects['8'].buildSettings['FRAMEWORK_SEARCH_PATHS'])
def testAddRunScriptBeforeCompile(self):
project = XcodeProject(self.obj)
project.add_run_script(LS_LA_COMMAND, insert_before_compile=True)
self.assertEqual(project.objects[project.objects['1'].buildPhases[0]].shellScript, LS_LA_COMMAND)
self.assertEqual(project.objects[project.objects['2'].buildPhases[0]].shellScript, LS_LA_COMMAND)
def testAddRunScriptAfterCompile(self):
project = XcodeProject(self.obj)
project.add_run_script(LS_LA_COMMAND)
self.assertEqual(project.objects[project.objects['1'].buildPhases[1]].shellScript, LS_LA_COMMAND)
self.assertEqual(project.objects[project.objects['2'].buildPhases[1]].shellScript, LS_LA_COMMAND)
def testAddRunScriptWithInputFiles(self):
project = XcodeProject(self.obj)
script = u'ls -la ${SCRIPT_INPUT_FILE_0} ${SCRIPT_INPUT_FILE_1} > ${SCRIPT_OUTPUT_FILE_0}'
project.add_run_script(script, input_files=['a.txt', '/tmp/b.txt'], output_files=['../output.log'])
self.assertEqual(project.objects[project.objects['1'].buildPhases[1]].shellScript, script)
self.assertEqual(project.objects[project.objects['1'].buildPhases[1]].inputPaths, ['a.txt', '/tmp/b.txt'])
self.assertEqual(project.objects[project.objects['1'].buildPhases[1]].outputPaths, ['../output.log'])
self.assertEqual(project.objects[project.objects['2'].buildPhases[1]].shellScript, script)
self.assertEqual(project.objects[project.objects['2'].buildPhases[1]].inputPaths, ['a.txt', '/tmp/b.txt'])
self.assertEqual(project.objects[project.objects['2'].buildPhases[1]].outputPaths, ['../output.log'])
def testRemoveRunScript(self):
project = XcodeProject(self.obj)
project.add_run_script(LS_LA_COMMAND, insert_before_compile=True)
self.assertEqual(project.objects[project.objects['1'].buildPhases[0]].shellScript, LS_LA_COMMAND)
self.assertEqual(project.objects[project.objects['2'].buildPhases[0]].shellScript, LS_LA_COMMAND)
project.remove_run_script(LS_LA_COMMAND)
self.assertEqual(project.objects['1'].buildPhases[0], u'compile')
self.assertEqual(project.objects['2'].buildPhases[0], u'compile')
def testRemoveRunScriptNotFound(self):
project = XcodeProject(self.obj)
project.add_run_script(LS_LA_COMMAND, insert_before_compile=True)
self.assertEqual(project.objects[project.objects['1'].buildPhases[0]].shellScript, LS_LA_COMMAND)
self.assertEqual(project.objects[project.objects['2'].buildPhases[0]].shellScript, LS_LA_COMMAND)
project.remove_run_script(u'ls')
self.assertEqual(project.objects[project.objects['1'].buildPhases[0]].shellScript, LS_LA_COMMAND)
self.assertEqual(project.objects[project.objects['2'].buildPhases[0]].shellScript, LS_LA_COMMAND)
def testAddRunScriptWithoutInstallBuild(self):
project = XcodeProject(self.obj)
project.add_run_script(LS_LA_COMMAND, run_install_build=0)
self.assertEqual(project.objects[project.objects['2'].buildPhases[1]].runOnlyForDeploymentPostprocessing, 0)
def testAddRunScriptWithInstallBuild(self):
project = XcodeProject(self.obj)
project.add_run_script(LS_LA_COMMAND, run_install_build=1)
self.assertEqual(project.objects[project.objects['1'].buildPhases[1]].runOnlyForDeploymentPostprocessing, 1)
def testAddCodeSignAllTargetAllConfigurations(self):
project = XcodeProject(self.obj)
project.add_code_sign('iPhone Distribution', 'MYTEAM', '0x0x0x0x0', 'Provisioning name')
self.assertEqual(project.objects['0'].attributes.TargetAttributes[u'1'].ProvisioningStyle, PBXProvisioningTypes.MANUAL)
self.assertEqual(project.objects['0'].attributes.TargetAttributes[u'2'].ProvisioningStyle,
PBXProvisioningTypes.MANUAL)
self.assertEqual(project.objects['5'].buildSettings['CODE_SIGN_IDENTITY[sdk=iphoneos*]'], 'iPhone Distribution')
self.assertEqual(project.objects['6'].buildSettings['CODE_SIGN_IDENTITY[sdk=iphoneos*]'], 'iPhone Distribution')
self.assertEqual(project.objects['7'].buildSettings['CODE_SIGN_IDENTITY[sdk=iphoneos*]'], 'iPhone Distribution')
self.assertEqual(project.objects['8'].buildSettings['CODE_SIGN_IDENTITY[sdk=iphoneos*]'], 'iPhone Distribution')
self.assertEqual(project.objects['5'].buildSettings['DEVELOPMENT_TEAM'], 'MYTEAM')
self.assertEqual(project.objects['6'].buildSettings['DEVELOPMENT_TEAM'], 'MYTEAM')
self.assertEqual(project.objects['7'].buildSettings['DEVELOPMENT_TEAM'], 'MYTEAM')
self.assertEqual(project.objects['8'].buildSettings['DEVELOPMENT_TEAM'], 'MYTEAM')
self.assertEqual(project.objects['5'].buildSettings['PROVISIONING_PROFILE'], '0x0x0x0x0')
self.assertEqual(project.objects['6'].buildSettings['PROVISIONING_PROFILE'], '0x0x0x0x0')
self.assertEqual(project.objects['7'].buildSettings['PROVISIONING_PROFILE'], '0x0x0x0x0')
self.assertEqual(project.objects['8'].buildSettings['PROVISIONING_PROFILE'], '0x0x0x0x0')
self.assertEqual(project.objects['5'].buildSettings['PROVISIONING_PROFILE_SPECIFIER'], 'Provisioning name')
self.assertEqual(project.objects['6'].buildSettings['PROVISIONING_PROFILE_SPECIFIER'], 'Provisioning name')
self.assertEqual(project.objects['7'].buildSettings['PROVISIONING_PROFILE_SPECIFIER'], 'Provisioning name')
self.assertEqual(project.objects['8'].buildSettings['PROVISIONING_PROFILE_SPECIFIER'], 'Provisioning name')
def testAddCodeSignOneTargetAllConfigurations(self):
project = XcodeProject(self.obj)
project.add_code_sign('iPhone Distribution', 'MYTEAM', '0x0x0x0x0', 'Provisioning name', target_name='app')
self.assertEqual(project.objects['0'].attributes.TargetAttributes[u'1'].ProvisioningStyle,
PBXProvisioningTypes.MANUAL)
self.assertIsNone(project.objects['0'].attributes.TargetAttributes[u'2'])
self.assertIsNone(project.objects['7']['buildSettings'])
self.assertIsNone(project.objects['8']['buildSettings'])
self.assertEqual(project.objects['5'].buildSettings['CODE_SIGN_IDENTITY[sdk=iphoneos*]'], 'iPhone Distribution')
self.assertEqual(project.objects['6'].buildSettings['CODE_SIGN_IDENTITY[sdk=iphoneos*]'], 'iPhone Distribution')
self.assertEqual(project.objects['5'].buildSettings['DEVELOPMENT_TEAM'], 'MYTEAM')
self.assertEqual(project.objects['6'].buildSettings['DEVELOPMENT_TEAM'], 'MYTEAM')
self.assertEqual(project.objects['5'].buildSettings['PROVISIONING_PROFILE'], '0x0x0x0x0')
self.assertEqual(project.objects['6'].buildSettings['PROVISIONING_PROFILE'], '0x0x0x0x0')
self.assertEqual(project.objects['5'].buildSettings['PROVISIONING_PROFILE_SPECIFIER'], 'Provisioning name')
self.assertEqual(project.objects['6'].buildSettings['PROVISIONING_PROFILE_SPECIFIER'], 'Provisioning name')
def testAddCodeSignAllTargetOneConfigurations(self):
project = XcodeProject(self.obj)
project.add_code_sign('iPhone Distribution', 'MYTEAM', '0x0x0x0x0', 'Provisioning name', configuration_name='Release')
self.assertEqual(project.objects['0'].attributes.TargetAttributes[u'1'].ProvisioningStyle, PBXProvisioningTypes.MANUAL)
self.assertEqual(project.objects['0'].attributes.TargetAttributes[u'2'].ProvisioningStyle,
PBXProvisioningTypes.MANUAL)
self.assertIsNone(project.objects['6']['buildSettings'])
self.assertIsNone(project.objects['8']['buildSettings'])
self.assertEqual(project.objects['5'].buildSettings['CODE_SIGN_IDENTITY[sdk=iphoneos*]'], 'iPhone Distribution')
self.assertEqual(project.objects['7'].buildSettings['CODE_SIGN_IDENTITY[sdk=iphoneos*]'], 'iPhone Distribution')
self.assertEqual(project.objects['5'].buildSettings['DEVELOPMENT_TEAM'], 'MYTEAM')
self.assertEqual(project.objects['7'].buildSettings['DEVELOPMENT_TEAM'], 'MYTEAM')
self.assertEqual(project.objects['5'].buildSettings['PROVISIONING_PROFILE'], '0x0x0x0x0')
self.assertEqual(project.objects['7'].buildSettings['PROVISIONING_PROFILE'], '0x0x0x0x0')
self.assertEqual(project.objects['5'].buildSettings['PROVISIONING_PROFILE_SPECIFIER'], 'Provisioning name')
self.assertEqual(project.objects['7'].buildSettings['PROVISIONING_PROFILE_SPECIFIER'], 'Provisioning name')
|
|
#!/usr/bin/env python
# GBX Challenge Reader with some salt from Xymph's PHP class
# written 2009 by Markus Ullmann <mail@markus-ullmann.de>
# License: as-is
#
# ChangeLog
# ---------
# 1.0 Initial release
import struct, os
from xml.dom.minidom import parseString
racetypes = {
-1: 'unknown',
0: 'Race',
1: 'Platform',
2: 'Puzzle',
3: 'Crazy',
5: 'Stunts'
}
class GBXReadException(Exception):
pass
class GBXWrongFileTypeException(Exception):
pass
# our fast init confuses pylint, so
# pylint: disable-msg=W0201, R0902
class GBXChallengeReader:
"""Class to read and hold attributes of a Trackmania Challenge Gbx"""
def __init__(self, filename):
self.filename = filename
# start with values set to none so scripts don't need to do hasattr()
for var in ['uid', 'version', 'name', 'author', 'tracktype',
'racetype', 'envir', 'mood', 'pub', 'authortime',
'goldtime','silvertime','bronzetime','coppers','multilap'
'unknown','unknown2','authorscore','password','xmlver'
'exever', 'nblaps','songfile','modname','modfile',
'thumbnail','comment']:
setattr(self, var, None)
self.rawxml = ''
self.parsedxml = ''
self.filehandle = None
self.getData()
# keep nadeo-colorized name here
self.full_name = unicode(self.name)
import re
regex_format = re.compile(r'\$[g|n|o|w|s|i|l|z|G|N|O|W|S|I|L|Z]')
regex_colors = re.compile(r'\$.{3}')
name = regex_format.sub(u'', self.name)
name = regex_colors.sub(u'', name)
self.name = name
def ReadGBXString(self):
f = self.filehandle
(datalen,) = struct.unpack('<l', f.read(4))
if datalen <= 0 or datalen >= 0x10000:
raise GBXReadException('OutOfLengthScope')
data = f.read(datalen)
return data
def getData(self):
# open file
self.filehandle = open(self.filename, mode='rb')
f = self.filehandle
# Start from 0 and seek for GBX intro header
f.seek(0)
data = f.read(5)
if data != 'GBX' + chr(6) + chr(0):
raise GBXWrongFileTypeException('GBX Header missing')
# Read GBX Type
f.seek(4, os.SEEK_CUR) # "BUCR" | "BUCE"
(data,) = struct.unpack('>L', f.read(4))
self.tracktype = '%08X' % data
if self.tracktype not in ['00300024', '00300403']:
raise GBXWrongFileTypeException('Not a GBX Track')
# GBX Version: 2/3 = TM/TMPowerUp, 4 = TMO(LevelUp)/TMS/TMN, 5 = TMU/TMF
f.seek(4, os.SEEK_CUR) # Data Block Offset
(self.version,) = struct.unpack('<L', f.read(4))
if self.version < 2 or self.version > 5:
raise GBXWrongFileTypeException('Unsupported GBX Version Format')
# get Index (marker/lengths) table
marks = {}
lengths = {}
for i in range(1, self.version+1):
(marks[i],) = struct.unpack('>L', f.read(4))
(lengths[i],) = struct.unpack('<L', f.read(4))
if self.version == 5: # clear high-bits
lengths[4] &= 0x7FFFFFFF
lengths[5] &= 0x7FFFFFFF
# start of Times/info block:
# 0x25 (TM v2), 0x2D (TMPowerUp v3), 0x35 (TMO/TMS/TMN v4), 0x3D (TMU/TMF v5)
# get count of Times/info entries (well... sorta)
# TM v2 tracks use 3, TMPowerUp v3 tracks use 4; actual count is 2 more
# oldest TMO/TMS tracks (exever="0.1.3.0-0.1.4.1") use 6-8, actual count always 8; no unknown2/ascore
# older TMS tracks (exever="0.1.4.3-6") use 9; no author score
# newer TMO/TMS tracks (exever>="0.1.4.8") and TMN/TMU/TMF tracks (exever<="2.11.4") use 10
# TMF tracks (exever>="2.11.5") use 11; with unknown3
entrycount = ord(f.read(1))
f.seek(4, os.SEEK_CUR) # Unknown1: 00 00 00 00
(self.bronzetime,) = struct.unpack('<L', f.read(4))
(self.silvertime,) = struct.unpack('<L', f.read(4))
(self.goldtime,) = struct.unpack('<L', f.read(4))
(self.authortime,) = struct.unpack('<L', f.read(4))
if self.version >= 3: # version >= 3, exever>="0.1.3.0"
(self.coppers,) = struct.unpack('<L', f.read(4))
if entrycount >= 6:
(data,) = struct.unpack('<L', f.read(4))
self.multilap = True if data else False
(data,) = struct.unpack('<L', f.read(4))
if data in racetypes.keys():
self.racetype = data
else:
self.racetype = -1
if entrycount >= 9:
(self.unknown,) = struct.unpack('<L', f.read(4))
if entrycount >= 10:
(self.authorscore,) = struct.unpack('<L', f.read(4))
if entrycount >= 11:
(self.unknown2,) = struct.unpack('<L', f.read(4))
# start of Strings block in version 2 (0x3A, TM)
# start of Version? block in versions >= 3
f.seek(4, os.SEEK_CUR)
# 00 03 00 00 (TM v2)
# 01 03 00 00 (TMPowerUp v3; TMO v4, exever="0.1.3.3-5"; TMS v4, exever="0.1.4.0")
# 02 03 00 00 (TMS v4, exever="0.1.4.1-6")
# 03 03 00 00 (TMO/TMS v4, exever="0.1.4.8", rare)
# 04 03 00 00 (TMO/TMS/TMN v4, exever>="0.1.4.8")
# 05 03 00 00 (TMU/TMF v5)
# start of Strings block in versions >= 3
# 0x4A (TMPowerUp v3)
# 0x5A (TMO/TMS v4, exever="0.1.3.3-0.1.4.1")
# 0x5E (TMS v4, exever="0.1.4.3-6")
# 0x62 (TMO/TMS/TMN v4, exever>="0.1.4.8")
# 0x6A (TMU/TMF v5, exever<="2.11.4")
# 0x6E (TMF v5, exever>="2.11.5")
f.seek(5, os.SEEK_CUR) # 00 and 00 00 00 80
self.uid = self.ReadGBXString()
f.seek(4, os.SEEK_CUR) # 00 00 00 40
self.envir = self.ReadGBXString()
f.seek(4, os.SEEK_CUR) # 00 00 00 [04|80]
self.author = self.ReadGBXString()
self.name = self.ReadGBXString()
f.seek(1, os.SEEK_CUR) # almost always 08
if self.version >= 3:
f.seek(4, os.SEEK_CUR) # varies... a lot
# password is optional, ReadGBXString might yell at us
try:
self.password = self.ReadGBXString()
except GBXReadException:
self.password = ""
if self.version >= 4 and entrycount >= 8: # exever>="0.1.4.1"
f.seek(4, os.SEEK_CUR) # 00 00 00 40
self.mood = self.ReadGBXString()
f.seek(4, os.SEEK_CUR) # 02 00 00 40
data = f.read(4) # 03 00 00 40 if no pub, otherwise 00 00 00 40
if data[0] != chr(3):
self.pub = self.ReadGBXString()
else:
self.pub = ''
# set pointer to start of next block based on actual offsets
lens = 0
for i in range(1, self.version+1):
lens += 8
if i <= 3:
lens += lengths[i]
f.seek(0x15 + lens, os.SEEK_SET)
# get optional XML block & wrap lines for readability
if self.version >= 4:
self.rawxml = self.ReadGBXString()
self.rawxml = self.rawxml.replace("><", ">\n<")
# get optional Thumbnail/Comments block
if self.version >= 5:
f.seek(4, os.SEEK_CUR) # 01 00 00 00
(data,) = struct.unpack('<L', f.read(4))
f.seek(15, os.SEEK_CUR) # '<Thumbnail.jpg>'
# check for thumbnail
if data > 0 and data < 0x10000:
# extract and return thumbnail image
data = f.read(data)
f.seek(0x10, os.SEEK_CUR) # '</Thumbnail.jpg>'
f.seek(10, os.SEEK_CUR) # '<Comments>'
try:
self.comment = self.ReadGBXString()
except GBXReadException:
self.comment = ""
f.seek(11, os.SEEK_CUR) # '</Comments>'
f.close()
# to make this pickle-able, deref file object (safe as closed before)
self.filehandle = None
# convert password to hex format
if self.password:
data = self.password
self.password = ""
for i in range(3, len(data)): # skip 3 bogus chars
self.password += '%02X' % ord(data[i])
if self.rawxml:
self.parsedxml = parseString(self.rawxml)
# extract some minor details from xml if available
myxml = self.parsedxml
if myxml.documentElement.tagName == "header":
# if we actually have a header (yes there are broken tracks out there)
self.exever = myxml.documentElement.getAttribute('exever')
self.exever = myxml.documentElement.getAttribute('version')
descs = myxml.documentElement.getElementsByTagName('desc')[0]
self.nblabs = descs.getAttribute('nblabs')
if descs.hasAttribute('mod'):
self.modname = descs.getAttribute('mod')
else:
self.modname = ''
ident = myxml.documentElement.getElementsByTagName('ident')[0]
self.author = ident.getAttribute('author')
# skim through <deps> for songfile and modfile
deps = myxml.documentElement.getElementsByTagName('deps')[0]
for dep in deps.getElementsByTagName('dep'):
filename = dep.getAttribute('file')
if filename.find('\\Mod\\') > 0:
self.modfile = filename.split('\\Mod\\', 1)[1]
elif filename.find('ChallengeMusics\\') > 0:
self.songfile = filename.split('ChallengeMusics\\', 1)[1]
|
|
#!/usr/bin/python
"""
fantastic Add-on
Copyright (C) 2016 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import sys
import urllib
import string
import json
class JSUnfuck(object):
numbers = None
words = {
"(![]+[])": "false",
"([]+{})": "[object Object]",
"(!![]+[])": "true",
"([][[]]+[])": "undefined",
"(+{}+[])": "NaN",
"([![]]+[][[]])": "falseundefined",
"([][f+i+l+t+e+r]+[])": "function filter() { [native code] }",
"(!![]+[][f+i+l+t+e+r])": "truefunction filter() { [native code] }",
"(+![]+([]+[])[c+o+n+s+t+r+u+c+t+o+r])": "0function String() { [native code] }",
"(+![]+[![]]+([]+[])[c+o+n+s+t+r+u+c+t+o+r])": "0falsefunction String() { [native code] }",
"([]+[][s+o+r+t][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +l+o+c+a+t+i+o+n)())": "https://123movies.to",
"([]+[])[f+o+n+t+c+o+l+o+r]()": '<font color="undefined"></font>',
"(+(+!![]+e+1+0+0+0)+[])": "Infinity",
"(+[![]]+[][f+i+l+t+e+r])": 'NaNfunction filter() { [native code] }',
'(+[![]]+[+(+!+[]+(!+[]+[])[3]+[1]+[0]+[0]+[0])])': 'NaNInfinity',
'([]+[])[i+t+a+l+i+c+s]()': '<i></i>',
'[[]][c+o+n+c+a+t]([[]])+[]': ',',
'([][f+i+l+l]+[])': 'function fill() { [native code]}',
'(!![]+[][f+i+l+l])': 'truefunction fill() { [native code]}',
'((+[])[c+o+n+s+t+r+u+c+t+o+r]+[])': 'function Number() {[native code]} _display:45:1',
'(+(+!+[]+[1]+e+[2]+[0])+[])': '1.1e+21',
'([]+[])[c+o+n+s+t+r+u+c+t+o+r][n+a+m+e]': 'S+t+r+i+n+g',
'([][e+n+t+r+i+e+s]()+[])': '[object Array Iterator]',
'([]+[])[l+i+n+k](")': '<a href="""></a>',
'(![]+[0])[i+t+a+l+i+c+s]()': '<i>false0</i>',
# dummy to force array dereference
'DUMMY1': '6p',
'DUMMY2': '2x',
'DUMMY3': '%3C',
'DUMMY4': '%5B',
'DUMMY5': '6q',
'DUMMY6': '4h',
}
uniqs = {
'[t+o+S+t+r+i+n+g]': 1,
'[][f+i+l+t+e+r][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +e+s+c+a+p+e)()': 2,
'[][f+i+l+t+e+r][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +u+n+e+s+c+a+p+e)()': 3,
'[][s+o+r+t][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +e+s+c+a+p+e)()': 2,
'[][s+o+r+t][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +u+n+e+s+c+a+p+e)()': 3,
}
def __init__(self, js):
self.js = js
def decode(self, replace_plus=True):
while True:
start_js = self.js
self.repl_words(self.words)
self.repl_numbers()
self.repl_arrays(self.words)
self.repl_uniqs(self.uniqs)
if start_js == self.js:
break
if replace_plus:
self.js = self.js.replace('+', '')
self.js = re.sub('\[[A-Za-z]*\]', '', self.js)
self.js = re.sub('\[(\d+)\]', '\\1', self.js)
return self.js
def repl_words(self, words):
while True:
start_js = self.js
for key, value in sorted(words.items(), key=lambda x: len(x[0]), reverse=True):
self.js = self.js.replace(key, value)
if self.js == start_js:
break
def repl_arrays(self, words):
for word in sorted(words.values(), key=lambda x: len(x), reverse=True):
for index in xrange(0, 100):
try:
repl = word[index]
self.js = self.js.replace('%s[%d]' % (word, index), repl)
except:
pass
def repl_numbers(self):
if self.numbers is None:
self.numbers = self.__gen_numbers()
while True:
start_js = self.js
for key, value in sorted(self.numbers.items(), key=lambda x: len(x[0]), reverse=True):
self.js = self.js.replace(key, value)
if self.js == start_js:
break
def repl_uniqs(self, uniqs):
for key, value in uniqs.iteritems():
if key in self.js:
if value == 1:
self.__handle_tostring()
elif value == 2:
self.__handle_escape(key)
elif value == 3:
self.__handle_unescape(key)
def __handle_tostring(self):
for match in re.finditer('(\d+)\[t\+o\+S\+t\+r\+i\+n\+g\](\d+)', self.js):
repl = to_base(match.group(1), match.group(2))
self.js = self.js.replace(match.group(0), repl)
def __handle_escape(self, key):
while True:
start_js = self.js
offset = self.js.find(key) + len(key)
if self.js[offset] == '(' and self.js[offset + 2] == ')':
c = self.js[offset + 1]
self.js = self.js.replace('%s(%s)' % (key, c), urllib.quote(c))
if start_js == self.js:
break
def __handle_unescape(self, key):
start = 0
while True:
start_js = self.js
offset = self.js.find(key, start)
if offset == -1: break
offset += len(key)
expr = ''
extra = ''
last_c = self.js[offset - 1]
abort = False
for i, c in enumerate(self.js[offset:]):
extra += c
if c == ')':
break
elif (i > 0 and c == '(') or (c == '[' and last_c != '+'):
abort = True
break
elif c == '%' or c in string.hexdigits:
expr += c
last_c = c
if not abort:
self.js = self.js.replace(key + extra, urllib.unquote(expr))
if start_js == self.js:
break
else:
start = offset
def __gen_numbers(self):
n = {'!+[]+!![]+!![]+!![]+!![]+!![]+!![]+!![]+!![]': '9',
'!+[]+!![]+!![]+!![]+!![]': '5', '!+[]+!![]+!![]+!![]': '4',
'!+[]+!![]+!![]+!![]+!![]+!![]': '6', '!+[]+!![]': '2',
'!+[]+!![]+!![]': '3', '(+![]+([]+[]))': '0', '(+[]+[])': '0', '+[]':'0',
'(+!![]+[])': '1', '!+[]+!![]+!![]+!![]+!![]+!![]+!![]': '7',
'!+[]+!![]+!![]+!![]+!![]+!![]+!![]+!![]': '8', '+!![]': '1',
'[+[]]': '[0]', '!+[]+!+[]': '2', '[+!+[]]': '[1]', '(+20)': '20',
'[+!![]]': '[1]', '[+!+[]+[+[]]]': '[10]', '+(1+1)': '11'}
for i in xrange(2, 20):
key = '+!![]' * (i - 1)
key = '!+[]' + key
n['(' + key + ')'] = str(i)
key += '+[]'
n['(' + key + ')'] = str(i)
n['[' + key + ']'] = '[' + str(i) + ']'
for i in xrange(2, 10):
key = '!+[]+' * (i - 1) + '!+[]'
n['(' + key + ')'] = str(i)
n['[' + key + ']'] = '[' + str(i) + ']'
key = '!+[]' + '+!![]' * (i - 1)
n['[' + key + ']'] = '[' + str(i) + ']'
for i in xrange(0, 10):
key = '(+(+!+[]+[%d]))' % (i)
n[key] = str(i + 10)
key = '[+!+[]+[%s]]' % (i)
n[key] = '[' + str(i + 10) + ']'
for tens in xrange(2, 10):
for ones in xrange(0, 10):
key = '!+[]+' * (tens) + '[%d]' % (ones)
n['(' + key + ')'] = str(tens * 10 + ones)
n['[' + key + ']'] = '[' + str(tens * 10 + ones) + ']'
for hundreds in xrange(1, 10):
for tens in xrange(0, 10):
for ones in xrange(0, 10):
key = '+!+[]' * hundreds + '+[%d]+[%d]))' % (tens, ones)
if hundreds > 1: key = key[1:]
key = '(+(' + key
n[key] = str(hundreds * 100 + tens * 10 + ones)
return n
def to_base(n, base, digits="0123456789abcdefghijklmnopqrstuvwxyz"):
n, base = int(n), int(base)
if n < base:
return digits[n]
else:
return to_base(n // base, base, digits).lstrip(digits[0]) + digits[n % base]
def cfunfuck(fuckedup):
fuck = re.findall(r's,t,o,p,b,r,e,a,k,i,n,g,f,\s*(\w+=).*?:\+?\(?(.*?)\)?\}', fuckedup)
fucks = re.findall(r'(\w+)\.\w+([\+\-\*\/]=)\+?\(?(.*?)\)?;', fuckedup)
endunfuck = fuck[0][0].split('=')[0]
unfuck = JSUnfuck(fuck[0][1]).decode()
unfuck = re.sub(r'[\(\)]', '', unfuck)
unfuck = fuck[0][0]+unfuck
exec(unfuck)
for fucker in fucks:
unfucker = JSUnfuck(fucker[2]).decode()
unfucker = re.sub(r'[\(\)]', '', unfucker)
unfucker = fucker[0]+fucker[1]+unfucker
exec(unfucker)
return str(eval(endunfuck))
def main():
with open(sys.argv[1]) as f:
start_js = f.read()
print JSUnfuck(start_js).decode()
if __name__ == '__main__':
sys.exit(main())
|
|
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import datetime, timedelta
import json
import logging
import mock
import shutil
import tempfile
from c7n import policy, manager
from c7n.config import Config
from c7n.provider import clouds
from c7n.exceptions import ResourceLimitExceeded, PolicyValidationError
from c7n.resources import aws, load_resources
from c7n.resources.aws import AWS
from c7n.resources.ec2 import EC2
from c7n.schema import generate, JsonSchemaValidator
from c7n.utils import dumps
from c7n.query import ConfigSource, TypeInfo
from c7n.version import version
from .common import BaseTest, event_data, Bag
class DummyResource(manager.ResourceManager):
def resources(self):
return [{"abc": 123}, {"def": 456}]
@property
def actions(self):
class _a(object):
def name(self):
return self.f.__name__
def __init__(self, f):
self.f = f
def process(self, resources):
return self.f(resources)
def p1(resources):
return [{"abc": 456}, {"def": 321}]
def p2(resources):
return resources
return [_a(p1), _a(p2)]
class PolicyMetaLint(BaseTest):
def setUp(self):
# we need to load all resources for the linting meta tests.
load_resources()
def test_policy_missing_provider_session(self):
self.assertRaises(
RuntimeError,
policy.get_session_factory,
'nosuchthing', Bag())
def test_policy_detail_spec_permissions(self):
policy = self.load_policy(
{"name": "kinesis-delete", "resource": "kinesis", "actions": ["delete"]}
)
perms = policy.get_permissions()
self.assertEqual(
perms,
set(
(
"kinesis:DescribeStream",
"kinesis:ListStreams",
"kinesis:DeleteStream",
)
),
)
def test_schema_plugin_name_mismatch(self):
# todo iterate over all clouds not just aws resources
for k, v in manager.resources.items():
for fname, f in v.filter_registry.items():
if fname in ("or", "and", "not"):
continue
self.assertIn(fname, f.schema["properties"]["type"]["enum"])
for aname, a in v.action_registry.items():
self.assertIn(aname, a.schema["properties"]["type"]["enum"])
def test_schema(self):
try:
schema = generate()
JsonSchemaValidator.check_schema(schema)
except Exception:
self.fail("Invalid schema")
def test_schema_serialization(self):
try:
dumps(generate())
except Exception:
self.fail("Failed to serialize schema")
def test_resource_augment_universal_mask(self):
# universal tag had a potential bad patterm of masking
# resource augmentation, scan resources to ensure
missing = []
for k, v in manager.resources.items():
if not getattr(v.resource_type, "universal_taggable", None):
continue
if (
v.augment.__name__ == "universal_augment" and
getattr(v.resource_type, "detail_spec", None)
):
missing.append(k)
if missing:
self.fail(
"%s resource has universal augment masking resource augment" % (
', '.join(missing))
)
def test_resource_universal_taggable_arn_type(self):
missing = []
for k, v in manager.resources.items():
if not getattr(v, 'augment', None):
continue
if (
v.augment.__name__ == "universal_augment" and
v.resource_type.arn_type is None
):
missing.append(k)
if missing:
self.fail("%s universal taggable resource missing arn_type" % (
', '.join(missing)))
def test_resource_shadow_source_augment(self):
shadowed = []
bad = []
cfg = Config.empty()
for k, v in manager.resources.items():
if not getattr(v.resource_type, "config_type", None):
continue
p = Bag({"name": "permcheck", "resource": k, 'provider_name': 'aws'})
ctx = self.get_context(config=cfg, policy=p)
mgr = v(ctx, p)
source = mgr.get_source("config")
if not isinstance(source, ConfigSource):
bad.append(k)
if v.__dict__.get("augment"):
shadowed.append(k)
if shadowed:
self.fail(
"%s have resource managers shadowing source augments"
% (", ".join(shadowed))
)
if bad:
self.fail("%s have config types but no config source" % (", ".join(bad)))
def test_resource_arn_override_generator(self):
overrides = set()
for k, v in manager.resources.items():
arn_gen = bool(v.__dict__.get('get_arns') or v.__dict__.get('generate_arn'))
if arn_gen:
overrides.add(k)
overrides = overrides.difference(set(
('account', 's3', 'hostedzone', 'log-group', 'rest-api', 'redshift-snapshot',
'rest-stage')))
if overrides:
raise ValueError("unknown arn overrides in %s" % (", ".join(overrides)))
def test_resource_name(self):
names = []
for k, v in manager.resources.items():
if not getattr(v.resource_type, "name", None):
names.append(k)
if names:
self.fail("%s dont have resource name for reporting" % (", ".join(names)))
def test_resource_meta_with_class(self):
missing = set()
for k, v in manager.resources.items():
if k in ('rest-account', 'account'):
continue
if not issubclass(v.resource_type, TypeInfo):
missing.add(k)
if missing:
raise SyntaxError("missing type info class %s" % (', '.join(missing)))
def test_resource_type_empty_metadata(self):
empty = set()
for k, v in manager.resources.items():
if k in ('rest-account', 'account'):
continue
for rk, rv in v.resource_type.__dict__.items():
if rk[0].isalnum() and rv is None:
empty.add(k)
if empty:
raise ValueError("Empty Resource Metadata %s" % (', '.join(empty)))
def test_resource_legacy_type(self):
legacy = set()
marker = object()
for k, v in manager.resources.items():
if getattr(v.resource_type, 'type', marker) is not marker:
legacy.add(k)
if legacy:
raise SyntaxError("legacy arn type info %s" % (', '.join(legacy)))
def _visit_filters_and_actions(self, visitor):
names = []
for cloud_name, cloud in clouds.items():
for resource_name, resource in cloud.resources.items():
for fname, f in resource.filter_registry.items():
if fname in ('and', 'or', 'not'):
continue
if visitor(f):
names.append("%s.%s.filters.%s" % (
cloud_name, resource_name, fname))
for aname, a in resource.action_registry.items():
if visitor(a):
names.append('%s.%s.actions.%s' % (
cloud_name, resource_name, aname))
return names
def test_filter_action_additional(self):
def visitor(e):
if e.type == 'notify':
return
return e.schema.get('additionalProperties', True) is True
names = self._visit_filters_and_actions(visitor)
if names:
self.fail(
"missing additionalProperties: False on actions/filters\n %s" % (
" \n".join(names)))
def test_filter_action_type(self):
def visitor(e):
return 'type' not in e.schema['properties']
names = self._visit_filters_and_actions(visitor)
if names:
self.fail("missing type on actions/filters\n %s" % (" \n".join(names)))
def test_resource_arn_info(self):
missing = []
whitelist_missing = set((
'rest-stage', 'rest-resource', 'rest-vpclink'))
explicit = []
whitelist_explicit = set((
'rest-account', 'shield-protection', 'shield-attack',
'dlm-policy', 'efs', 'efs-mount-target', 'gamelift-build',
'glue-connection', 'glue-dev-endpoint', 'cloudhsm-cluster',
'snowball-cluster', 'snowball', 'ssm-activation',
'healthcheck', 'event-rule-target',
'support-case', 'transit-attachment', 'config-recorder'))
missing_method = []
for k, v in manager.resources.items():
rtype = getattr(v, 'resource_type', None)
if not v.has_arn():
missing_method.append(k)
if rtype is None:
continue
if v.__dict__.get('get_arns'):
continue
if getattr(rtype, 'arn', None) is False:
explicit.append(k)
if getattr(rtype, 'arn', None) is not None:
continue
if getattr(rtype, 'type', None) is not None:
continue
if getattr(rtype, 'arn_type', None) is not None:
continue
missing.append(k)
self.assertEqual(
set(missing).union(explicit),
set(missing_method))
missing = set(missing).difference(whitelist_missing)
if missing:
self.fail(
"%d resources %s are missing arn type info" % (
len(missing), ", ".join(missing)))
explicit = set(explicit).difference(whitelist_explicit)
if explicit:
self.fail(
"%d resources %s dont have arn type info exempted" % (
len(explicit), ", ".join(explicit)))
def test_resource_permissions(self):
self.capture_logging("c7n.cache")
missing = []
cfg = Config.empty()
for k, v in list(manager.resources.items()):
p = Bag({"name": "permcheck", "resource": k, 'provider_name': 'aws'})
ctx = self.get_context(config=cfg, policy=p)
mgr = v(ctx, p)
perms = mgr.get_permissions()
if not perms:
missing.append(k)
for n, a in list(v.action_registry.items()):
p["actions"] = [n]
perms = a({}, mgr).get_permissions()
found = bool(perms)
if not isinstance(perms, (list, tuple, set)):
found = False
if "webhook" == n:
continue
if not found:
missing.append("%s.actions.%s" % (k, n))
for n, f in list(v.filter_registry.items()):
if n in ("and", "or", "not", "missing"):
continue
p["filters"] = [n]
perms = f({}, mgr).get_permissions()
if not isinstance(perms, (tuple, list, set)):
missing.append("%s.filters.%s" % (k, n))
# in memory filters
if n in (
"event",
"value",
"tag-count",
"marked-for-op",
"offhour",
"onhour",
"age",
"state-age",
"egress",
"ingress",
"capacity-delta",
"is-ssl",
"global-grants",
"missing-policy-statement",
"missing-statement",
"healthcheck-protocol-mismatch",
"image-age",
"has-statement",
"no-access",
"instance-age",
"ephemeral",
"instance-uptime",
):
continue
qk = "%s.filters.%s" % (k, n)
if qk in ("route-table.filters.route",):
continue
if not perms:
missing.append(qk)
if missing:
self.fail(
"Missing permissions %d on \n\t%s"
% (len(missing), "\n\t".join(sorted(missing)))
)
class PolicyMeta(BaseTest):
def test_policy_detail_spec_permissions(self):
policy = self.load_policy(
{"name": "kinesis-delete",
"resource": "kinesis",
"actions": ["delete"]}
)
perms = policy.get_permissions()
self.assertEqual(
perms,
set(
(
"kinesis:DescribeStream",
"kinesis:ListStreams",
"kinesis:DeleteStream",
)
),
)
def test_policy_manager_custom_permissions(self):
policy = self.load_policy(
{
"name": "ec2-utilization",
"resource": "ec2",
"filters": [
{
"type": "metrics",
"name": "CPUUtilization",
"days": 3,
"value": 1.5,
}
],
}
)
perms = policy.get_permissions()
self.assertEqual(
perms,
set(
(
"ec2:DescribeInstances",
"ec2:DescribeTags",
"cloudwatch:GetMetricStatistics",
)
),
)
class TestPolicyCollection(BaseTest):
def test_expand_partitions(self):
cfg = Config.empty(regions=["us-gov-west-1", "cn-north-1", "us-west-2"])
original = policy.PolicyCollection.from_data(
{"policies": [{"name": "foo", "resource": "ec2"}]}, cfg
)
collection = AWS().initialize_policies(original, cfg)
self.assertEqual(
sorted([p.options.region for p in collection]),
["cn-north-1", "us-gov-west-1", "us-west-2"],
)
def test_policy_expand_group_region(self):
cfg = Config.empty(regions=["us-east-1", "us-east-2", "us-west-2"])
original = policy.PolicyCollection.from_data(
{"policies": [
{"name": "bar", "resource": "lambda"},
{"name": "middle", "resource": "security-group"},
{"name": "foo", "resource": "ec2"}]},
cfg)
collection = AWS().initialize_policies(original, cfg)
self.assertEqual(
[(p.name, p.options.region) for p in collection],
[('bar', 'us-east-1'),
('middle', 'us-east-1'),
('foo', 'us-east-1'),
('bar', 'us-east-2'),
('middle', 'us-east-2'),
('foo', 'us-east-2'),
('bar', 'us-west-2'),
('middle', 'us-west-2'),
('foo', 'us-west-2')])
def test_policy_region_expand_global(self):
factory = self.replay_flight_data('test_aws_policy_global_expand')
self.patch(aws, '_profile_session', factory())
original = self.policy_loader.load_data(
{"policies": [
{"name": "foo", "resource": "s3"},
{"name": "iam", "resource": "iam-user"}]},
'memory://',
config=Config.empty(regions=["us-east-1", "us-west-2"]),
)
collection = AWS().initialize_policies(original, Config.empty(regions=["all"]))
self.assertEqual(len(collection.resource_types), 2)
s3_regions = [p.options.region for p in collection if p.resource_type == "s3"]
self.assertTrue("us-east-1" in s3_regions)
self.assertTrue("us-east-2" in s3_regions)
iam = [p for p in collection if p.resource_type == "iam-user"]
self.assertEqual(len(iam), 1)
self.assertEqual(iam[0].options.region, "us-east-1")
collection = AWS().initialize_policies(
original, Config.empty(regions=["eu-west-1", "eu-west-2"])
)
iam = [p for p in collection if p.resource_type == "iam-user"]
self.assertEqual(len(iam), 1)
self.assertEqual(iam[0].options.region, "eu-west-1")
self.assertEqual(len(collection), 3)
class TestPolicy(BaseTest):
def test_policy_variable_precedent(self):
p = self.load_policy({
'name': 'compute',
'resource': 'aws.ec2'},
config={'account_id': '00100100'})
v = p.get_variables({'account_id': 'foobar',
'charge_code': 'oink'})
self.assertEqual(v['account_id'], '00100100')
self.assertEqual(v['charge_code'], 'oink')
def test_policy_with_role_complete(self):
p = self.load_policy({
'name': 'compute',
'resource': 'aws.ec2',
'mode': {
'type': 'config-rule',
'member-role': 'arn:aws:iam::{account_id}:role/BarFoo',
'role': 'arn:aws:iam::{account_id}:role/FooBar'},
'actions': [
{'type': 'tag',
'value': 'bad monkey {account_id} {region} {now:+2d%Y-%m-%d}'},
{'type': 'notify',
'to': ['me@example.com'],
'transport': {
'type': 'sns',
'topic': 'arn:::::',
},
'subject': "S3 - Cross-Account -[custodian {{ account }} - {{ region }}]"},
]}, config={'account_id': '12312311', 'region': 'zanzibar'})
p.expand_variables(p.get_variables())
self.assertEqual(p.data['mode']['role'], 'arn:aws:iam::12312311:role/FooBar')
def test_policy_variable_interpolation(self):
p = self.load_policy({
'name': 'compute',
'resource': 'aws.ec2',
'mode': {
'type': 'config-rule',
'member-role': 'arn:aws:iam::{account_id}:role/BarFoo',
'role': 'FooBar'},
'actions': [
{'type': 'tag',
'value': 'bad monkey {account_id} {region} {now:+2d%Y-%m-%d}'},
{'type': 'notify',
'to': ['me@example.com'],
'transport': {
'type': 'sns',
'topic': 'arn:::::',
},
'subject': "S3 - Cross-Account -[custodian {{ account }} - {{ region }}]"},
]}, config={'account_id': '12312311', 'region': 'zanzibar'})
ivalue = 'bad monkey 12312311 zanzibar %s' % (
(datetime.utcnow() + timedelta(2)).strftime('%Y-%m-%d'))
p.expand_variables(p.get_variables())
self.assertEqual(p.data['actions'][0]['value'], ivalue)
self.assertEqual(
p.data['actions'][1]['subject'],
"S3 - Cross-Account -[custodian {{ account }} - {{ region }}]")
self.assertEqual(p.data['mode']['role'], 'arn:aws:iam::12312311:role/FooBar')
self.assertEqual(p.data['mode']['member-role'], 'arn:aws:iam::{account_id}:role/BarFoo')
self.assertEqual(p.resource_manager.actions[0].data['value'], ivalue)
def test_child_resource_trail_validation(self):
self.assertRaises(
ValueError,
self.load_policy,
{
"name": "api-resources",
"resource": "rest-resource",
"mode": {
"type": "cloudtrail",
"events": [
{
"source": "apigateway.amazonaws.com",
"event": "UpdateResource",
"ids": "requestParameter.stageName",
}
],
},
},
)
def test_load_policy_validation_error(self):
invalid_policies = {
"policies": [
{
"name": "foo",
"resource": "s3",
"filters": [{"tag:custodian_tagging": "not-null"}],
"actions": [
{"type": "untag", "tags": {"custodian_cleanup": "yes"}}
],
}
]
}
self.assertRaises(Exception, self.load_policy_set, invalid_policies)
def test_policy_validation(self):
policy = self.load_policy(
{
"name": "ec2-utilization",
"resource": "ec2",
"tags": ["abc"],
"filters": [
{
"type": "metrics",
"name": "CPUUtilization",
"days": 3,
"value": 1.5,
}
],
"actions": ["stop"],
}
)
policy.validate()
self.assertEqual(policy.tags, ["abc"])
self.assertFalse(policy.is_lambda)
self.assertTrue(
repr(policy).startswith("<Policy resource:ec2 name:ec2-utilization")
)
def test_policy_name_and_resource_type_filtering(self):
collection = self.load_policy_set(
{
"policies": [
{"name": "s3-remediate", "resource": "s3"},
{"name": "s3-global-grants", "resource": "s3"},
{"name": "ec2-tag-compliance-stop", "resource": "ec2"},
{"name": "ec2-tag-compliance-kill", "resource": "ec2"},
{"name": "ec2-tag-compliance-remove", "resource": "ec2"},
]
}
)
self.assertIn("s3-remediate", collection)
self.assertNotIn("s3-argle-bargle", collection)
# Make sure __iter__ works
for p in collection:
self.assertTrue(p.name is not None)
self.assertEqual(collection.resource_types, set(("s3", "ec2")))
self.assertTrue("s3-remediate" in collection)
self.assertEqual(
[p.name for p in collection.filter(["s3*"])],
["s3-remediate", "s3-global-grants"],
)
self.assertEqual(
[p.name for p in collection.filter(["ec2*"])],
[
"ec2-tag-compliance-stop",
"ec2-tag-compliance-kill",
"ec2-tag-compliance-remove",
],
)
self.assertEqual(
[p.name for p in collection.filter(["ec2*", "s3*"])],
[p.name for p in collection],
)
self.assertEqual(
[p.name for p in collection.filter(resource_types=["ec2"])],
[
"ec2-tag-compliance-stop",
"ec2-tag-compliance-kill",
"ec2-tag-compliance-remove",
],
)
self.assertEqual(
[p.name for p in collection.filter(resource_types=["ec2", "s3"])],
[p.name for p in collection],
)
self.assertEqual(
[p.name for p in collection.filter(["ec2*", "s3*"], ["ec2", "s3"])],
[p.name for p in collection],
)
self.assertEqual(
[p.name for p in collection.filter(["ec2*", "s3*"], ["s3"])],
[
"s3-remediate",
"s3-global-grants",
],
)
self.assertEqual(
[p.name for p in collection.filter(["asdf12"])],
[],
)
self.assertEqual(
[p.name for p in collection.filter(resource_types=["asdf12"])],
[],
)
def test_file_not_found(self):
self.assertRaises(IOError, policy.load, Config.empty(), "/asdf12")
def test_lambda_policy_metrics(self):
session_factory = self.replay_flight_data("test_lambda_policy_metrics")
p = self.load_policy(
{
"name": "ec2-tag-compliance-v6",
"resource": "ec2",
"mode": {"type": "ec2-instance-state", "events": ["running"]},
"filters": [
{"tag:custodian_status": "absent"},
{
"or": [
{"tag:App": "absent"},
{"tag:Env": "absent"},
{"tag:Owner": "absent"},
]
},
],
},
session_factory=session_factory,
)
end = datetime.utcnow()
start = end - timedelta(14)
period = 24 * 60 * 60 * 14
self.assertEqual(
json.loads(dumps(p.get_metrics(start, end, period), indent=2)),
{
u"Durations": [],
u"Errors": [
{
u"Sum": 0.0,
u"Timestamp": u"2016-05-30T10:50:00+00:00",
u"Unit": u"Count",
}
],
u"Invocations": [
{
u"Sum": 4.0,
u"Timestamp": u"2016-05-30T10:50:00+00:00",
u"Unit": u"Count",
}
],
u"ResourceCount": [
{
u"Average": 1.0,
u"Sum": 2.0,
u"Timestamp": u"2016-05-30T10:50:00+00:00",
u"Unit": u"Count",
}
],
u"Throttles": [
{
u"Sum": 0.0,
u"Timestamp": u"2016-05-30T10:50:00+00:00",
u"Unit": u"Count",
}
],
},
)
def test_policy_resource_limits(self):
session_factory = self.replay_flight_data(
"test_policy_resource_limits")
p = self.load_policy(
{
"name": "log-delete",
"resource": "log-group",
"max-resources-percent": 2.5,
},
session_factory=session_factory)
p.ctx.metrics.flush = mock.MagicMock()
output = self.capture_logging('custodian.policy', level=logging.ERROR)
self.assertRaises(ResourceLimitExceeded, p.run)
self.assertEqual(
output.getvalue().strip(),
"policy:log-delete exceeded resource-limit:2.5% found:1 total:1")
self.assertEqual(
p.ctx.metrics.buf[0]['MetricName'], 'ResourceLimitExceeded')
def test_policy_resource_limits_count(self):
session_factory = self.replay_flight_data(
"test_policy_resource_count")
p = self.load_policy(
{
"name": "ecs-cluster-resource-count",
"resource": "ecs",
"max-resources": 1
},
session_factory=session_factory)
self.assertRaises(ResourceLimitExceeded, p.run)
policy = {
"name": "ecs-cluster-resource-count",
"resource": "ecs",
"max-resources": 0
}
config = Config.empty(validate=True)
self.assertRaises(
Exception,
self.load_policy,
policy,
config=config,
validate=True,
session_factory=session_factory
)
def test_policy_resource_limit_and_percent(self):
session_factory = self.replay_flight_data(
"test_policy_resource_count")
p = self.load_policy(
{
"name": "ecs-cluster-resource-count",
"resource": "ecs",
"max-resources": {
"amount": 1,
"percent": 10,
"op": "and"
}
},
session_factory=session_factory)
self.assertRaises(ResourceLimitExceeded, p.run)
p = self.load_policy(
{
"name": "ecs-cluster-resource-count",
"resource": "ecs",
"max-resources": {
"amount": 100,
"percent": 10,
"op": "and"
}
},
session_factory=session_factory)
resources = p.run()
self.assertTrue(resources)
def test_policy_resource_limits_with_filter(self):
session_factory = self.replay_flight_data(
"test_policy_resource_count_with_filter")
p = self.load_policy(
{
"name": "asg-with-image-age-resource-count",
"resource": "asg",
"max-resources": 1,
"filters": [{
"type": "image-age",
"op": "ge",
"days": 0
}]
},
session_factory=session_factory)
resources = p.run()
self.assertTrue(resources)
def test_policy_metrics(self):
session_factory = self.replay_flight_data("test_policy_metrics")
p = self.load_policy(
{
"name": "s3-encrypt-keys",
"resource": "s3",
"actions": [{"type": "encrypt-keys"}],
},
session_factory=session_factory,
)
end = datetime.now().replace(hour=0, minute=0, microsecond=0)
start = end - timedelta(14)
period = 24 * 60 * 60 * 14
self.maxDiff = None
self.assertEqual(
json.loads(dumps(p.get_metrics(start, end, period), indent=2)),
{
"ActionTime": [
{
"Timestamp": "2016-05-30T00:00:00+00:00",
"Average": 8541.752702140668,
"Sum": 128126.29053211001,
"Unit": "Seconds",
}
],
"Total Keys": [
{
"Timestamp": "2016-05-30T00:00:00+00:00",
"Average": 1575708.7333333334,
"Sum": 23635631.0,
"Unit": "Count",
}
],
"ResourceTime": [
{
"Timestamp": "2016-05-30T00:00:00+00:00",
"Average": 8.682969363532667,
"Sum": 130.24454045299,
"Unit": "Seconds",
}
],
"ResourceCount": [
{
"Timestamp": "2016-05-30T00:00:00+00:00",
"Average": 23.6,
"Sum": 354.0,
"Unit": "Count",
}
],
"Unencrypted": [
{
"Timestamp": "2016-05-30T00:00:00+00:00",
"Average": 10942.266666666666,
"Sum": 164134.0,
"Unit": "Count",
}
],
},
)
def test_get_resource_manager(self):
collection = self.load_policy_set(
{
"policies": [
{
"name": "query-instances",
"resource": "ec2",
"filters": [{"tag-key": "CMDBEnvironment"}],
}
]
}
)
p = collection.policies[0]
self.assertTrue(isinstance(p.load_resource_manager(), EC2))
def test_get_logs_from_group(self):
p_data = {
"name": "related-rds-test",
"resource": "rds",
"filters": [
{"key": "GroupName", "type": "security-group", "value": "default"}
],
"actions": [{"days": 10, "type": "retention"}],
}
session_factory = self.replay_flight_data("test_logs_from_group")
config = {"log_group": "test-logs"}
policy = self.load_policy(p_data, config, session_factory)
logs = list(policy.get_logs("2016-11-01 00:00:00", "2016-11-30 11:59:59"))
self.assertEqual(len(logs), 6)
# entries look reasonable
entry = logs[1]
self.assertIn("timestamp", entry)
self.assertIn("message", entry)
# none in range
logs = list(policy.get_logs("2016-10-01 00:00:00", "2016-10-31 11:59:59"))
self.assertEqual(len(logs), 0)
def xtest_policy_run(self):
manager.resources.register("dummy", DummyResource)
self.addCleanup(manager.resources.unregister, "dummy")
self.output_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.output_dir)
collection = self.load_policy_set(
{"policies": [{"name": "process-instances", "resource": "dummy"}]},
{"output_dir": self.output_dir},
)
p = collection.policies[0]
p()
self.assertEqual(len(p.ctx.metrics.data), 3)
def test_validate_policy_start_stop(self):
data = {
'name': 'bad-str-parse',
'resource': 'ec2',
'start': 'asdf'
}
with self.assertRaises(ValueError):
self.load_policy(data)
data = {
'name': 'bad-non-str-parse',
'resource': 'ec2',
'start': 2
}
with self.assertRaises(Exception):
self.load_policy(data)
data = {
'name': 'bad-tz-parse',
'resource': 'ec2',
'tz': 'asdf'
}
with self.assertRaises(PolicyValidationError):
self.load_policy(data)
data = {
'name': 'bad-tz-int-parse',
'resource': 'ec2',
'tz': 2
}
with self.assertRaises(Exception):
self.load_policy(data)
data = {
'name': 'good-time-parse',
'resource': 'ec2',
'start': '4 AM'
}
p = self.load_policy(data)
result = p.validate_policy_start_stop()
self.assertEqual(result, None)
data = {
'name': 'good-tz-str-parse',
'resource': 'ec2',
'tz': 'UTC'
}
p = self.load_policy(data)
result = p.validate_policy_start_stop()
self.assertEqual(result, None)
class PolicyExecutionModeTest(BaseTest):
def test_run_unimplemented(self):
self.assertRaises(NotImplementedError, policy.PolicyExecutionMode({}).run)
def test_get_logs_unimplemented(self):
self.assertRaises(
NotImplementedError, policy.PolicyExecutionMode({}).get_logs, 1, 2
)
class LambdaModeTest(BaseTest):
def test_tags_validation(self):
log_file = self.capture_logging('c7n.policy', level=logging.INFO)
self.load_policy({
'name': 'foobar',
'resource': 'aws.ec2',
'mode': {
'type': 'config-rule',
'tags': {
'custodian-mode': 'xyz',
'xyz': 'bar'}
}},
validate=True)
lines = log_file.getvalue().strip().split('\n')
self.assertEqual(
lines[0],
('Custodian reserves policy lambda tags starting with '
'custodian - policy specifies custodian-mode'))
def test_tags_injection(self):
p = self.load_policy({
'name': 'foobar',
'resource': 'aws.ec2',
'mode': {
'type': 'config-rule',
'tags': {
'xyz': 'bar'}
}},
validate=True)
from c7n import mu
policy_lambda = []
def publish(self, func, alias=None, role=None, s3_uri=None):
policy_lambda.append(func)
self.patch(mu.LambdaManager, 'publish', publish)
p.provision()
self.assertEqual(
policy_lambda[0].tags['custodian-info'],
'mode=config-rule:version=%s' % version)
class PullModeTest(BaseTest):
def test_skip_when_region_not_equal(self):
log_file = self.capture_logging("custodian.policy")
policy_name = "rds-test-policy"
p = self.load_policy(
{
"name": policy_name,
"resource": "rds",
"region": "us-east-1",
"filters": [{"type": "default-vpc"}],
},
config={"region": "us-west-2"},
session_factory=None,
)
p.run()
lines = log_file.getvalue().strip().split("\n")
self.assertIn(
"Skipping policy:{} target-region:us-east-1 current-region:us-west-2".format(
policy_name
),
lines,
)
def test_is_runnable_mismatch_region(self):
p = self.load_policy(
{'name': 'region-mismatch',
'resource': 'ec2',
'region': 'us-east-1'},
config={'region': 'us-west-2', 'validate': True},
session_factory=None)
pull_mode = policy.PullMode(p)
self.assertEqual(pull_mode.is_runnable(), False)
def test_is_runnable_dates(self):
p = self.load_policy(
{'name': 'good-start-date',
'resource': 'ec2',
'tz': 'UTC',
'start': '2018-3-29'},
config={'validate': True},
session_factory=None)
pull_mode = policy.PullMode(p)
self.assertEqual(pull_mode.is_runnable(), True)
tomorrow_date = str(datetime.date(datetime.utcnow()) + timedelta(days=1))
p = self.load_policy(
{'name': 'bad-start-date',
'resource': 'ec2',
'tz': 'UTC',
'start': tomorrow_date},
config={'validate': True},
session_factory=None)
pull_mode = policy.PullMode(p)
self.assertEqual(pull_mode.is_runnable(), False)
p = self.load_policy(
{'name': 'good-end-date',
'resource': 'ec2',
'tz': 'UTC',
'end': tomorrow_date},
config={'validate': True},
session_factory=None)
pull_mode = policy.PullMode(p)
self.assertEqual(pull_mode.is_runnable(), True)
p = self.load_policy(
{'name': 'bad-end-date',
'resource': 'ec2',
'tz': 'UTC',
'end': '2018-3-29'},
config={'validate': True},
session_factory=None)
pull_mode = policy.PullMode(p)
self.assertEqual(pull_mode.is_runnable(), False)
p = self.load_policy(
{'name': 'bad-start-end-date',
'resource': 'ec2',
'tz': 'UTC',
'start': '2018-3-28',
'end': '2018-3-29'},
config={'validate': True},
session_factory=None)
pull_mode = policy.PullMode(p)
self.assertEqual(pull_mode.is_runnable(), False)
def test_is_runnable_parse_dates(self):
p = self.load_policy(
{'name': 'parse-date-policy',
'resource': 'ec2',
'tz': 'UTC',
'start': 'March 3 2018'},
config={'validate': True},
session_factory=None)
pull_mode = policy.PullMode(p)
self.assertEqual(pull_mode.is_runnable(), True)
p = self.load_policy(
{'name': 'parse-date-policy',
'resource': 'ec2',
'tz': 'UTC',
'start': 'March 3rd 2018'},
config={'validate': True},
session_factory=None)
pull_mode = policy.PullMode(p)
self.assertEqual(pull_mode.is_runnable(), True)
p = self.load_policy(
{'name': 'parse-date-policy',
'resource': 'ec2',
'tz': 'UTC',
'start': '28 March 2018'},
config={'validate': True},
session_factory=None)
pull_mode = policy.PullMode(p)
self.assertEqual(pull_mode.is_runnable(), True)
class PhdModeTest(BaseTest):
def test_validation(self):
self.assertRaises(
PolicyValidationError,
self.load_policy,
{'name': 'xyz', 'resource': 'ec2',
'mode': {'type': 'phd'}})
self.load_policy(
{'name': 'abc', 'resource': 'account',
'mode': {'type': 'phd'}})
class GuardModeTest(BaseTest):
def test_unsupported_resource(self):
self.assertRaises(
ValueError,
self.load_policy,
{"name": "vpc", "resource": "vpc", "mode": {"type": "guard-duty"}},
validate=True,
)
def test_lambda_policy_validate_name(self):
name = "ec2-instance-guard-D8488F01-0E3E-4772-A3CB-E66EEBB9BDF4"
with self.assertRaises(PolicyValidationError) as e_cm:
self.load_policy(
{"name": name,
"resource": "ec2",
"mode": {"type": "guard-duty"}},
validate=True)
self.assertTrue("max length with prefix" in str(e_cm.exception))
@mock.patch("c7n.mu.LambdaManager.publish")
def test_ec2_guard_event_pattern(self, publish):
def assert_publish(policy_lambda, role):
events = policy_lambda.get_events(mock.MagicMock())
self.assertEqual(len(events), 1)
pattern = json.loads(events[0].render_event_pattern())
expected = {
"source": ["aws.guardduty"],
"detail": {"resource": {"resourceType": ["Instance"]}},
"detail-type": ["GuardDuty Finding"],
}
self.assertEqual(pattern, expected)
publish.side_effect = assert_publish
p = self.load_policy(
{
"name": "ec2-instance-guard",
"resource": "ec2",
"mode": {"type": "guard-duty"},
}
)
p.run()
@mock.patch("c7n.mu.LambdaManager.publish")
def test_iam_guard_event_pattern(self, publish):
def assert_publish(policy_lambda, role):
events = policy_lambda.get_events(mock.MagicMock())
self.assertEqual(len(events), 1)
pattern = json.loads(events[0].render_event_pattern())
expected = {
"source": ["aws.guardduty"],
"detail": {"resource": {"resourceType": ["AccessKey"]}},
"detail-type": ["GuardDuty Finding"],
}
self.assertEqual(pattern, expected)
publish.side_effect = assert_publish
p = self.load_policy(
{
"name": "iam-user-guard",
"resource": "iam-user",
"mode": {"type": "guard-duty"},
}
)
p.run()
@mock.patch("c7n.query.QueryResourceManager.get_resources")
def test_ec2_instance_guard(self, get_resources):
def instances(ids, cache=False):
return [{"InstanceId": ids[0]}]
get_resources.side_effect = instances
p = self.load_policy(
{
"name": "ec2-instance-guard",
"resource": "ec2",
"mode": {"type": "guard-duty"},
}
)
event = event_data("ec2-duty-event.json")
results = p.push(event, None)
self.assertEqual(results, [{"InstanceId": "i-99999999"}])
@mock.patch("c7n.query.QueryResourceManager.get_resources")
def test_iam_user_access_key_annotate(self, get_resources):
def users(ids, cache=False):
return [{"UserName": ids[0]}]
get_resources.side_effect = users
p = self.load_policy(
{
"name": "user-key-guard",
"resource": "iam-user",
"mode": {"type": "guard-duty"},
}
)
event = event_data("iam-duty-event.json")
results = p.push(event, None)
self.assertEqual(
results,
[
{
u"UserName": u"GeneratedFindingUserName",
u"c7n:AccessKeys": {u"AccessKeyId": u"GeneratedFindingAccessKeyId"},
}
],
)
|
|
#!/usr/bin/python
#
# TODO:
# - Currently can't specify the history file.
# - File "/home/todsah/Development/mcplayeredit/lib/icmd.py", line 97, in _help_getspecifics
# help_short, help_desc = doc[0], '\n '.join(doc[1:]) (EMPTY DOC STRING)
#
"""
ICmd is a wrapper library for easily creating interactive commandline programs.
You simply create a class that inherits from the ICmdBase class and create a
new ICmd instance with that class.
ICmd will automatically use readline if it is available, giving you a history
and tab-completion of commands. The ICmd class will get input from the user and
then run methods on the root class you passed to its constructor.
Example:
class ICmdTest(ICmdBase):
def load(self, fname):
self.fname = fname
print "Loading %s" % (self.fname)
def save(self):
fname = getattr(self, 'fname', None)
print "Saving %s" % (fname)
def test(self, required, optional='optional'):
logging.info("Test: required=%s, optional=%s" % (required, optional))
icmd = ICmd(ICmdTest)
icmd.run()
NOTES:
You can NOT use method decorators, as ICmd does introspection of the
methods in the derived class.
"""
import sys
import os
import inspect
import re
import logging
# Try to load the clusterfuck that is readline. THANKS GNU!
try:
# See if we can load PyReadline (an almost pure Python implementation of
# readline for windows)
import pyreadline as readline
except ImportError:
pass
try:
# Lets try the Unix readline version.
import readline
except ImportError:
pass
class ICmdBase(object):
"""
Base class for ICmd commandline classes. Inherit from this class to get
default commands in your commandline application.
"""
def __init__(self, helptext_prefix = '', helptext_suffix = '', batch=False):
self.helptext_prefix = helptext_prefix
self.helptext_suffix = helptext_suffix
self.batch = batch
def help(self, command=None):
"""
Display help
Displays all available commands or specific help for COMMAND if given.
"""
if command:
# Display command-specific help
try:
func = getattr(self, command)
except AttributeError, e:
raise ICmdError(1, "No such command: '%s'. Type 'help [command]' for help." % (command))
if not command.startswith('_') and callable(func):
help = self._help_getspecifics(command)
self._output("%s: %s" % (command, help[0]))
self._output("Usage: %s\n" % (help[2]))
for line in help[1].splitlines():
self._output(" %s" % (line))
self._output('')
else:
# Display all available commands
self._output(self.helptext_prefix)
for cmd in dir(self):
if not cmd.startswith('_') and callable(getattr(self, cmd)):
help = self._help_getspecifics(cmd)
self._output(' %10s: %s' % (cmd, help[0]))
self._output(self.helptext_suffix)
def _help_getspecifics(self, command):
help_short = ''
help_desc = ''
help_usage = ''
# Get short and full descriptions from the function's docstring.
func = getattr(self, command)
if func.__doc__:
for line in func.__doc__.strip().splitlines():
if line.lower().strip().startswith('usage:'):
help_usage = line[8:].strip()
elif not help_short:
help_short = line.strip()
else:
help_desc += "%s\n" % (line.strip())
# Get usage from the parameters
if not help_usage:
args = inspect.getargspec(func)
parcnt_max = len(args.args) - 1
parcnt_min = len(args.args) - 1 - len(args.defaults or '')
help_usage = command
for i in range(1, len(args.args)):
if i <= parcnt_min:
help_usage += " <%s>" % (args.args[i])
else:
help_usage += " [%s]" % (args.args[i])
return([help_short.strip(), help_desc.strip(), help_usage.strip()])
def quit(self):
"""
Exit the program.
Exit the program. Does not save any changes!
"""
raise SystemExit()
exit = quit
exit.__doc__ = exit.__doc__
def _output(self, line):
if not self.batch:
sys.stdout.write(line + '\n')
def _error(self, line):
sys.stderr.write(line + '\n')
class ICmdError(Exception):
pass
class ICmd(object):
"""
Interactive/Batch Commandline interface. Given a class that overloads the
ICmdBase class, provide an interactive commandline to control that class.
"""
def __init__(self, rootclass, prompt='> ', histfile=os.path.join(os.environ.get('HOME', ''), '.icmd_hist'), welcometext='Type \'help\' for help.', helptext_prefix='The following commands are available:\n', helptext_suffix='\n(type \'help <command>\' for details)\n', batch=False):
"""
Create a new interactive commandline interface to rootclass by creating
an instance of rootclass (your class must derive from ICmdBase). Use
ICmd.run() or run_once() to start the commandline client. `batch`
indicates wether to run in batch mode. If so, ICmd is silent (except
for errors) and non-interactive; instead reading from stdin and
executing each line as a command. It will exit after no more lines are
available.
"""
self.rootclass = rootclass
self.prompt = prompt
self.welcometext = welcometext
self.batch = batch
self.histfile = histfile
self.instclass = self.rootclass(helptext_prefix, helptext_suffix, self.batch)
# Initialize readline, but only if we we're able to load the module.
if 'readline' in sys.modules or 'pyreadline' in sys.modules:
logging.info("Using readline")
try:
readline.read_history_file(self.histfile)
except IOError:
pass
logging.info("Setting readline completer")
readline.set_completer(self._completer)
readline.parse_and_bind("tab: complete")
if not self.batch:
sys.stdout.write(welcometext + '\n')
def dispatch(self, cmd, params=[]):
"""
Run `cmd` on the rootclass. `cmd` must be an existing callable in
rootclass. Raises ICmdErrors in case of problems with the command (no
such command, too many/few parameters).
"""
logging.info("Dispatching %s %s" % (cmd, str(params)))
try:
func = getattr(self.instclass, cmd)
getattr(func, '__call__') # Test callability
except AttributeError, e:
raise ICmdError(1, "No such command: '%s'. Type 'help [command]' for help." % (cmd))
# Introspect how many arguments the function takes and how many the
# user gave.
args = inspect.getargspec(func)
parcnt_given = len(params)
parcnt_max = len(args.args) - 1
parcnt_min = len(args.args) - 1 - len(args.defaults or '')
logging.info("dispatch: params: given: %i, min: %i, max: %i" % (parcnt_given, parcnt_min, parcnt_max))
if parcnt_given < parcnt_min:
raise ICmdError(2, 'Not enough parameters given')
elif not args.varargs and parcnt_given > parcnt_max:
raise ICmdError(3, 'Too many parameters given')
return(func(*params))
def _completer(self, text, state):
"""
Readline completer. Scans the Command object instance for member
functions that match. Returns the next possible completion requested
(state).
"""
logging.info("Completing '%s' '%s'" % (text, state))
w = [cmd for cmd in dir(self.instclass) if cmd.startswith(text) and not cmd.startswith('_') and callable(getattr(self.instclass, cmd))]
try:
return(w[state])
except IndexError:
return None
def run_once(self, catcherrors=True):
"""
Ask the user for a single line of input and run that command. Returns
the returned result of the command callable (i.e. the return value of
the function in rootclass). Multiple commands may be given by
delimiting them with a semi-colon. In this case, a list of outputs is
returned.
"""
inputline = raw_input(self.prompt)
output = []
if inputline:
cmdlines = inputline.split(';') # Allow for multiple commands on one line, delimited with ';'
for cmdline in cmdlines:
parts = cmdline.split()
cmd = parts.pop(0)
params = parts
output.append(self.dispatch(cmd, params))
# Backwards compatible. If multiple commands where given (delimited
# with ';'), return a list of return values from each call.
# Otherwise, return just one return value.
if len(output) == 1:
return(output[0])
else:
return(output)
else:
return(False)
def run(self, catcherrors=True):
"""
Continually ask the user for lines of input and run those commands.
Catches all ICmdErrors and displays those errors. Catches
KeyboardInterrupt and SystemExit exceptions in order to clean up
readline. Returns True if the player quit the application by typing
'quit' or 'exit'. Doesn't return anything (None) otherwise.
"""
if self.batch:
self.prompt = ''
try:
while True:
if catcherrors:
try:
self.run_once()
except ICmdError, e:
sys.stderr.write('%s\n' % (e.args[1]))
logging.info("ICmd.run intercepted an error: %s" % (e))
except EOFError, e:
break
else:
self.run_once()
except (SystemExit, KeyboardInterrupt):
if 'readline' in sys.modules or 'pyreadline' in sys.modules:
logging.info("Writing readline command history")
readline.write_history_file(self.histfile)
return(True)
if __name__ == "__main__":
class ICmdTest(ICmdBase):
def load(self, fname):
"""
Load a file
Load the file indicated by FNAME.
"""
self.fname = fname
print "Loading %s" % (self.fname)
def save(self):
"""
Save loaded file
Save changed made to the file loaded with the 'load' command.
"""
fname = getattr(self, 'fname', None)
print "Saving %s" % (fname)
def test(self, required, optional='optional'):
"""
Parameter tests
Some parameter tests with a non-optional and optional parameter.
Also a two-line help description.
"""
logging.info("Test: required=%s, optional=%s" % (required, optional))
#logging.getLogger().level = logging.INFO
icmd = ICmd(ICmdTest, batch=False)
icmd.run()
#icmd.dispatch('load', ['foo'])
#icmd.dispatch('save')
|
|
# -*- coding: utf-8 -*-
from sys import hexversion
import random
from .context import sortedcontainers
from sortedcontainers import SortedListWithKey
from itertools import chain
from nose.tools import raises
if hexversion < 0x03000000:
from itertools import izip as zip
range = xrange
def negate(val):
return -val
def test_identity():
slt = SortedListWithKey(range(100), load=7)
slt._check()
def test_init():
slt = SortedListWithKey(key=negate)
slt._check()
slt = SortedListWithKey(load=10000, key=negate)
assert slt._load == 10000
assert slt._twice == 20000
assert slt._half == 5000
slt._check()
slt = SortedListWithKey(range(10000), key=negate)
assert all(tup[0] == tup[1] for tup in zip(slt, reversed(range(10000))))
slt.clear()
assert slt._len == 0
assert slt._maxes == []
assert slt._lists == []
slt._check()
def test_key():
slt = SortedListWithKey(range(10000), key=lambda val: val % 10)
slt._check()
values = sorted(range(10000), key=lambda val: (val % 10, val))
assert slt == values
assert all(val in slt for val in range(10000))
def test_add():
random.seed(0)
slt = SortedListWithKey(key=negate)
for val in range(1000):
slt.add(val)
slt._check()
slt = SortedListWithKey(key=negate)
for val in range(1000, 0, -1):
slt.add(val)
slt._check()
slt = SortedListWithKey(key=negate)
for val in range(1000):
slt.add(random.random())
slt._check()
def test_update():
slt = SortedListWithKey(key=negate)
slt.update(range(1000))
assert len(slt) == 1000
slt._check()
slt.update(range(100))
assert len(slt) == 1100
slt._check()
slt.update(range(10000))
assert len(slt) == 11100
slt._check()
values = sorted((val for val in chain(range(100), range(1000), range(10000))), key=negate)
assert all(tup[0] == tup[1] for tup in zip(slt, values))
def test_contains():
slt = SortedListWithKey(key=negate)
assert 0 not in slt
slt.update(range(10000))
for val in range(10000):
assert val in slt
assert 10000 not in slt
assert -1 not in slt
slt._check()
def test_discard():
slt = SortedListWithKey(key=negate)
assert slt.discard(0) == None
assert len(slt) == 0
slt._check()
slt = SortedListWithKey([1, 2, 2, 2, 3, 3, 5], load=4, key=negate)
slt.discard(6)
slt._check()
slt.discard(4)
slt._check()
slt.discard(2)
slt._check()
assert all(tup[0] == tup[1] for tup in zip(slt, reversed([1, 2, 2, 3, 3, 5])))
def test_remove():
slt = SortedListWithKey(key=negate)
assert slt.discard(0) == None
assert len(slt) == 0
slt._check()
slt = SortedListWithKey([1, 2, 2, 2, 3, 3, 5], load=4, key=negate)
slt.remove(2)
slt._check()
assert all(tup[0] == tup[1] for tup in zip(slt, reversed([1, 2, 2, 3, 3, 5])))
@raises(ValueError)
def test_remove_valueerror1():
slt = SortedListWithKey(key=negate)
slt.remove(0)
@raises(ValueError)
def test_remove_valueerror2():
slt = SortedListWithKey(range(100), load=10, key=negate)
slt.remove(100)
@raises(ValueError)
def test_remove_valueerror3():
slt = SortedListWithKey([1, 2, 2, 2, 3, 3, 5], key=negate)
slt.remove(4)
def test_delete():
slt = SortedListWithKey(range(20), load=4, key=negate)
slt._check()
for val in range(20):
slt.remove(val)
slt._check()
assert len(slt) == 0
assert slt._maxes == []
assert slt._lists == []
def test_getitem():
random.seed(0)
slt = SortedListWithKey(load=17, key=negate)
slt.append(5)
assert slt[0] == 5
slt.clear()
lst = list()
for rpt in range(100):
val = random.random()
slt.add(val)
lst.append(val)
lst.sort(reverse=True)
assert all(slt[idx] == lst[idx] for idx in range(100))
assert all(slt[idx - 99] == lst[idx - 99] for idx in range(100))
def test_getitem_slice():
random.seed(0)
slt = SortedListWithKey(load=17, key=negate)
lst = list()
for rpt in range(100):
val = random.random()
slt.add(val)
lst.append(val)
lst.sort(reverse=True)
assert all(slt[start:] == lst[start:]
for start in [-75, -25, 0, 25, 75])
assert all(slt[:stop] == lst[:stop]
for stop in [-75, -25, 0, 25, 75])
assert all(slt[::step] == lst[::step]
for step in [-5, -1, 1, 5])
assert all(slt[start:stop] == lst[start:stop]
for start in [-75, -25, 0, 25, 75]
for stop in [-75, -25, 0, 25, 75])
assert all(slt[:stop:step] == lst[:stop:step]
for stop in [-75, -25, 0, 25, 75]
for step in [-5, -1, 1, 5])
assert all(slt[start::step] == lst[start::step]
for start in [-75, -25, 0, 25, 75]
for step in [-5, -1, 1, 5])
assert all(slt[start:stop:step] == lst[start:stop:step]
for start in [-75, -25, 0, 25, 75]
for stop in [-75, -25, 0, 25, 75]
for step in [-5, -1, 1, 5])
def test_getitem_slice_big():
slt = SortedListWithKey(range(4), key=negate)
lst = list(reversed(range(4)))
itr = ((start, stop, step)
for start in [-6, -4, -2, 0, 2, 4, 6]
for stop in [-6, -4, -2, 0, 2, 4, 6]
for step in [-3, -2, -1, 1, 2, 3])
for start, stop, step in itr:
assert slt[start:stop:step] == lst[start:stop:step]
@raises(ValueError)
def test_getitem_slicezero():
slt = SortedListWithKey(range(100), load=17, key=negate)
slt[::0]
@raises(IndexError)
def test_getitem_indexerror1():
slt = SortedListWithKey(key=negate)
slt[5]
@raises(IndexError)
def test_getitem_indexerror2():
slt = SortedListWithKey(range(100), key=negate)
slt[200]
@raises(IndexError)
def test_getitem_indexerror3():
slt = SortedListWithKey(range(100), key=negate)
slt[-101]
def test_delitem():
random.seed(0)
slt = SortedListWithKey(range(100), load=17, key=negate)
while len(slt) > 0:
del slt[random.randrange(len(slt))]
slt._check()
def test_delitem_slice():
slt = SortedListWithKey(range(100), load=17, key=negate)
del slt[10:40:1]
del slt[10:40:-1]
del slt[10:40:2]
del slt[10:40:-2]
def test_setitem():
random.seed(0)
slt = SortedListWithKey(range(0, 100, 10), load=4, key=negate)
slt[-3] = 20
slt._check()
values = list(enumerate(range(95, 5, -10)))
random.shuffle(values)
for pos, val in values:
slt[pos] = val
def test_setitem_slice():
slt = SortedListWithKey(range(100), load=17, key=negate)
slt[:10] = iter(range(99, 89, -1))
assert slt == list(range(99, -1, -1))
slt[:10:2] = iter([99, 97, 95, 93, 91])
assert slt == list(range(99, -1, -1))
slt[-50:] = range(49, -51, -1)
assert slt == list(range(99, -51, -1))
slt[-100:] = range(49, -1, -1)
assert slt == list(range(99, -1, -1))
slt[:] = range(99, -1, -1)
assert slt == list(range(99, -1, -1))
slt[90:] = []
assert slt == list(range(99, 9, -1))
slt[:10] = []
assert slt == list(range(89, 9, -1))
@raises(ValueError)
def test_setitem_slice_bad():
slt = SortedListWithKey(range(100), load=17, key=negate)
slt[:10] = list(reversed(range(10)))
@raises(ValueError)
def test_setitem_slice_bad1():
slt = SortedListWithKey(range(100), load=17, key=negate)
slt[10:20] = range(20, 30)
@raises(ValueError)
def test_setitem_slice_bad2():
slt = SortedListWithKey(range(100), load=17, key=negate)
slt[20:30] = range(10, 20)
def test_setitem_extended_slice():
slt = SortedListWithKey(range(1000, 0, -10), load=17, key=negate)
lst = list(range(1000, 0, -10))
lst[10:90:10] = range(905, 105, -100)
slt[10:90:10] = range(905, 105, -100)
assert slt == lst
@raises(ValueError)
def test_setitem_extended_slice_bad1():
slt = SortedListWithKey(range(100), load=17, key=negate)
slt[20:80:3] = list(range(10))
@raises(ValueError)
def test_setitem_extended_slice_bad2():
slt = SortedListWithKey(range(100), load=17, key=negate)
slt[40:90:5] = list(range(10))
@raises(ValueError)
def test_setitem_valueerror1():
slt = SortedListWithKey(range(10), key=negate)
slt[9] = 10
@raises(ValueError)
def test_setitem_valueerror2():
slt = SortedListWithKey(range(10), key=negate)
slt[0] = 0
def test_iter():
slt = SortedListWithKey(range(10000), key=negate)
itr = iter(slt)
assert all(tup[0] == tup[1] for tup in zip(range(9999, -1, -1), itr))
def test_reversed():
slt = SortedListWithKey(range(10000), key=negate)
rev = reversed(slt)
assert all(tup[0] == tup[1] for tup in zip(range(10000), rev))
def test_islice():
return
slt = SortedListWithKey(load=7, key=negate)
assert [] == list(slt.islice())
values = sorted(range(53), key=negate)
slt.update(values)
for start in range(53):
for stop in range(53):
assert list(slt.islice(start, stop)) == values[start:stop]
for start in range(53):
for stop in range(53):
assert list(slt.islice(start, stop, reverse=True)) == values[start:stop][::-1]
for start in range(53):
assert list(slt.islice(start=start)) == values[start:]
assert list(slt.islice(start=start, reverse=True)) == values[start:][::-1]
for stop in range(53):
assert list(slt.islice(stop=stop)) == values[:stop]
assert list(slt.islice(stop=stop, reverse=True)) == values[:stop][::-1]
def test_irange():
slt = SortedListWithKey(load=7, key=negate)
assert [] == list(slt.irange())
values = list(range(53))
slt.update(values)
for start in range(53):
for end in range(start, 53):
assert list(slt.irange(end, start)) == values[start:(end + 1)][::-1]
assert list(slt.irange(end, start, reverse=True)) == values[start:(end + 1)]
for start in range(53):
for end in range(start, 53):
assert list(slt.irange(end, start, (True, False))) == values[(start + 1):(end + 1)][::-1]
for start in range(53):
for end in range(start, 53):
assert list(slt.irange(end, start, (False, True))) == values[start:end][::-1]
for start in range(53):
for end in range(start, 53):
assert list(slt.irange(end, start, (False, False))) == values[(start + 1):end][::-1]
for start in range(53):
assert list(slt.irange(start)) == values[:(start + 1)][::-1]
for end in range(53):
assert list(slt.irange(None, end, (True, False))) == values[(end + 1):][::-1]
assert list(slt.irange(inclusive=(False, False))) == values[::-1]
assert list(slt.irange(-1)) == []
assert list(slt.irange(None, -1, (True, False))) == values[::-1]
def test_len():
slt = SortedListWithKey(key=negate)
for val in range(10000):
slt.add(val)
assert len(slt) == (val + 1)
def test_bisect_left():
slt = SortedListWithKey(key=negate)
assert slt.bisect_left(0) == 0
slt = SortedListWithKey(range(100), load=17, key=negate)
slt.update(range(100))
slt._check()
assert slt.bisect_left(50) == 98
assert slt.bisect_left(0) == 198
assert slt.bisect_left(-1) == 200
def test_bisect():
slt = SortedListWithKey(key=negate)
assert slt.bisect(10) == 0
slt = SortedListWithKey(range(100), load=17, key=negate)
slt.update(range(100))
slt._check()
assert slt.bisect(10) == 180
assert slt.bisect(0) == 200
def test_bisect_right():
slt = SortedListWithKey(key=negate)
assert slt.bisect_right(10) == 0
slt = SortedListWithKey(range(100), load=17, key=negate)
slt.update(range(100))
slt._check()
assert slt.bisect_right(10) == 180
assert slt.bisect_right(0) == 200
def test_copy():
slt = SortedListWithKey(range(100), load=7, key=negate)
two = slt.copy()
slt.add(100)
assert len(slt) == 101
assert len(two) == 100
def test_copy_copy():
import copy
slt = SortedListWithKey(range(100), load=7, key=negate)
two = copy.copy(slt)
slt.add(100)
assert len(slt) == 101
assert len(two) == 100
def test_count():
slt = SortedListWithKey(load=7, key=negate)
assert slt.count(0) == 0
for iii in range(100):
for jjj in range(iii):
slt.add(iii)
slt._check()
for iii in range(100):
assert slt.count(iii) == iii
def test_append():
slt = SortedListWithKey(load=17, key=negate)
slt.append(1000)
for val in range(999, -1, -1):
slt.append(val)
slt._check()
@raises(ValueError)
def test_append_valueerror():
slt = SortedListWithKey(range(100), key=negate)
slt.append(5)
def test_extend():
slt = SortedListWithKey(load=17, key=negate)
slt.extend(range(300, 200, -1))
slt._check()
slt.extend(list(range(200, 100, -1)))
slt._check()
for val in range(100, 0, -1):
del slt._index[:]
slt._build_index()
slt.extend([val] * (101 - val))
slt._check()
@raises(ValueError)
def test_extend_valueerror1():
slt = SortedListWithKey(key=negate)
slt.extend([1, 2, 3, 5, 4, 6])
@raises(ValueError)
def test_extend_valueerror2():
slt = SortedListWithKey(range(20), load=4, key=negate)
slt.extend([5, 4, 3, 2, 1])
def test_insert():
slt = SortedListWithKey(range(10), load=4, key=negate)
slt.insert(-1, 0)
slt._check()
slt.insert(-100, 9)
slt._check()
slt.insert(0, 10)
slt._check()
slt.insert(14, -1)
slt._check()
slt = SortedListWithKey(load=4, key=negate)
slt.insert(0, 5)
slt._check()
slt = SortedListWithKey(range(5, 15), load=4, key=negate)
for rpt in range(8):
slt.insert(0, 15)
slt._check()
slt = SortedListWithKey(range(10), load=4, key=negate)
slt.insert(8, 2)
slt._check()
@raises(ValueError)
def test_insert_valueerror1():
slt = SortedListWithKey(range(10), load=4, key=negate)
slt.insert(10, 5)
@raises(ValueError)
def test_insert_valueerror2():
slt = SortedListWithKey(range(10), load=4, key=negate)
slt.insert(0, 0)
@raises(ValueError)
def test_insert_valueerror3():
slt = SortedListWithKey(range(10), load=4, key=negate)
slt.insert(5, 3)
@raises(ValueError)
def test_insert_valueerror4():
slt = SortedListWithKey(range(10), load=4, key=negate)
slt.insert(5, 7)
def test_pop():
slt = SortedListWithKey(range(10), load=4, key=negate)
slt._check()
assert slt.pop() == 0
slt._check()
assert slt.pop(0) == 9
slt._check()
assert slt.pop(-2) == 2
slt._check()
assert slt.pop(4) == 4
slt._check()
@raises(IndexError)
def test_pop_indexerror1():
slt = SortedListWithKey(range(10), load=4, key=negate)
slt.pop(-11)
@raises(IndexError)
def test_pop_indexerror2():
slt = SortedListWithKey(range(10), load=4, key=negate)
slt.pop(10)
def test_index():
slt = SortedListWithKey(range(100), load=17, key=negate)
for pos, val in enumerate(range(99, -1, -1)):
assert val == slt.index(pos)
assert slt.index(99, 0, 1000) == 0
slt = SortedListWithKey((0 for rpt in range(100)), load=17, key=negate)
for start in range(100):
for stop in range(start, 100):
assert slt.index(0, start, stop + 1) == start
for start in range(100):
assert slt.index(0, -(100 - start)) == start
assert slt.index(0, -1000) == 0
@raises(ValueError)
def test_index_valueerror1():
slt = SortedListWithKey([0] * 10, load=4, key=negate)
slt.index(0, 10)
@raises(ValueError)
def test_index_valueerror2():
slt = SortedListWithKey([0] * 10, load=4, key=negate)
slt.index(0, 0, -10)
@raises(ValueError)
def test_index_valueerror3():
slt = SortedListWithKey([0] * 10, load=4, key=negate)
slt.index(0, 7, 3)
@raises(ValueError)
def test_index_valueerror4():
slt = SortedListWithKey([0] * 10, load=4, key=negate)
slt.index(1)
@raises(ValueError)
def test_index_valueerror5():
slt = SortedListWithKey(key=negate)
slt.index(1)
@raises(ValueError)
def test_index_valueerror6():
slt = SortedListWithKey(range(10), load=4, key=negate)
slt.index(6, 5)
def test_mul():
this = SortedListWithKey(range(10), load=4, key=negate)
that = this * 5
this._check()
that._check()
assert this == list(reversed((range(10))))
assert that == list(sorted(list(range(10)) * 5, reverse=True))
assert this != that
def test_imul():
this = SortedListWithKey(range(10), load=4, key=negate)
this *= 5
this._check()
assert this == sorted(list(range(10)) * 5, reverse=True)
def test_op_add():
this = SortedListWithKey(range(10), load=4, key=negate)
assert (this + this + this) == (this * 3)
that = SortedListWithKey(range(10), load=4, key=negate)
that += that
that += that
assert that == (this * 4)
def test_eq():
this = SortedListWithKey(range(10), load=4, key=negate)
that = SortedListWithKey(range(20), load=4, key=negate)
assert not (this == that)
that.clear()
that.update(range(10))
assert this == that
def test_lt():
this = SortedListWithKey(range(10), load=4, key=negate)
that = SortedListWithKey(range(10, 20), load=5, key=negate)
assert this < that
assert not (that < this)
that = SortedListWithKey(range(1, 20), load=6, key=negate)
assert this < that
that = SortedListWithKey(range(1, 10), load=4, key=negate)
assert not (this < that)
def test_lte():
this = SortedListWithKey(range(10), load=4, key=negate)
that = SortedListWithKey(range(10), load=5, key=negate)
assert this <= that
assert that <= this
del this[-1]
assert this <= that
assert not (that <= this)
def test_gt():
this = SortedListWithKey(range(10), load=4, key=negate)
that = SortedListWithKey(range(10, 20), load=5, key=negate)
assert that > this
assert not (this > that)
that = SortedListWithKey(range(1, 20), load=6, key=negate)
assert that > this
that = SortedListWithKey(range(1, 10), load=4, key=negate)
assert not (that > this)
def test_gte():
this = SortedListWithKey(range(10), load=4, key=negate)
that = SortedListWithKey(range(10), load=5, key=negate)
assert this >= that
assert that >= this
del this[-1]
assert that >= this
assert not (this >= that)
def test_repr():
this = SortedListWithKey(range(10), load=4, key=negate)
assert repr(this).startswith('SortedListWithKey([9, 8, 7, 6, 5, 4, 3, 2, 1, 0], key=<function negate at ')
def test_pickle():
import pickle
alpha = SortedListWithKey(range(10000), key=negate, load=500)
beta = pickle.loads(pickle.dumps(alpha))
assert alpha == beta
assert alpha._key == beta._key
assert alpha._load == beta._load
@raises(AssertionError)
def test_check():
slt = SortedListWithKey(range(10), load=4, key=negate)
slt._len = 5
slt._check()
if __name__ == '__main__':
import nose
nose.main()
|
|
##########################################################################
#
# Copyright (c) 2013-2014, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
import GafferOSL
import imath
import functools
_channelNamesOptions = {
"RGB" : IECore.Color3fData( imath.Color3f( 1 ) ),
"RGBA" : IECore.Color4fData( imath.Color4f( 1 ) ),
"R" : IECore.FloatData( 1 ),
"G" : IECore.FloatData( 1 ),
"B" : IECore.FloatData( 1 ),
"A" : IECore.FloatData( 1 ),
"customChannel" : IECore.FloatData( 1 ),
"customLayer" : IECore.Color3fData( imath.Color3f( 1 ) ),
"customLayerRGBA" : IECore.Color4fData( imath.Color4f( 1 ) ),
"closure" : None,
}
##########################################################################
# _ChannelsFooter
##########################################################################
class _ChannelsFooter( GafferUI.PlugValueWidget ) :
def __init__( self, plug ) :
row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal )
GafferUI.PlugValueWidget.__init__( self, row, plug )
with row :
GafferUI.Spacer( imath.V2i( GafferUI.PlugWidget.labelWidth(), 1 ) )
menuButton = GafferUI.MenuButton(
image = "plus.png",
hasFrame = False,
menu = GafferUI.Menu(
Gaffer.WeakMethod( self.__menuDefinition ),
title = "Add Input"
),
toolTip = "Add Input"
)
menuButton.setEnabled( not Gaffer.MetadataAlgo.readOnly( plug ) )
GafferUI.Spacer( imath.V2i( 1 ), imath.V2i( 999999, 1 ), parenting = { "expand" : True } )
def _updateFromPlug( self ) :
self.setEnabled( self._editable() )
def __menuDefinition( self ) :
result = IECore.MenuDefinition()
usedNames = set()
for p in self.getPlug().children():
# TODO - this method for checking if a plug variesWithContext should probably live in PlugAlgo
# ( it's based on Switch::variesWithContext )
sourcePlug = p["name"].source()
variesWithContext = sourcePlug.direction() == Gaffer.Plug.Direction.Out and isinstance( ComputeNode, sourcePlug.node() )
if not variesWithContext:
usedNames.add( p["name"].getValue() )
# Use a fixed order for some standard options that we want to list in a specific order
sortedOptions = []
for label in ["RGB", "RGBA", "R", "G", "B", "A" ]:
sortedOptions.append( (label, _channelNamesOptions[label] ) )
for label, defaultData in sorted( _channelNamesOptions.items() ):
if not label in [ i[0] for i in sortedOptions ]:
sortedOptions.append( (label, defaultData) )
categories = { "Standard" : [], "Custom" : [], "Advanced" : [] }
for label, defaultData in sortedOptions:
if label == "closure":
categories["Advanced"].append( ( label, label, defaultData ) )
else:
bareLabel = label.replace( "RGBA", "" ).replace( "RGB", "" )
channelName = bareLabel
if label.startswith( "custom" ):
if channelName in usedNames:
suffix = 2
while True:
channelName = bareLabel + str( suffix )
if not channelName in usedNames:
break
suffix += 1
categories["Custom"].append( ( label, channelName, defaultData ) )
else:
if channelName in usedNames:
continue
categories["Standard"].append( ( label, channelName, defaultData ) )
for category in [ "Standard", "Custom", "Advanced" ]:
for ( menuLabel, channelName, defaultData ) in categories[category]:
result.append(
"/" + category + "/" + menuLabel,
{
"command" : functools.partial( Gaffer.WeakMethod( self.__addPlug ), channelName, defaultData ),
}
)
return result
def __addPlug( self, name, defaultData ) :
alphaValue = None
if isinstance( defaultData, IECore.Color4fData ):
alphaValue = Gaffer.FloatPlug( "value", Gaffer.Plug.Direction.In, defaultData.value.a )
defaultData = IECore.Color3fData( imath.Color3f( defaultData.value.r, defaultData.value.g, defaultData.value.b ) )
if defaultData == None:
plugName = "closure"
name = ""
valuePlug = GafferOSL.ClosurePlug( "value" )
else:
plugName = "channel"
valuePlug = Gaffer.PlugAlgo.createPlugFromData( "value", Gaffer.Plug.Direction.In, Gaffer.Plug.Flags.Default, defaultData )
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
self.getPlug().addChild( Gaffer.NameValuePlug( name, valuePlug, True, plugName, Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
if alphaValue:
self.getPlug().addChild(
Gaffer.NameValuePlug( name + ".A" if name else "A", alphaValue, True, plugName, Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
)
def __channelLabelFromPlug( plug ):
if plug.typeId() == GafferOSL.ClosurePlug.staticTypeId():
return plug.parent().getName()
elif plug.typeId() == Gaffer.Color3fPlug.staticTypeId() and plug.parent()["name"].getValue() == "":
return "[RGB]"
else:
return plug.parent()["name"].getValue()
##########################################################################
# Metadata
##########################################################################
Gaffer.Metadata.registerNode(
GafferOSL.OSLImage,
"description",
"""
Executes OSL shaders to perform image processing. Use the shaders from
the OSL/ImageProcessing menu to read values from the input image and
then write values back to it.
""",
"plugAdderOptions", IECore.CompoundData( _channelNamesOptions ),
"layout:activator:defaultFormatActive", lambda node : not node["in"].getInput(),
plugs = {
"defaultFormat" : [
"description",
"""
The resolution and aspect ratio to output when there is no input image provided.
""",
"layout:activator", "defaultFormatActive",
],
"channels" : [
"description",
"""
Define image channels to output by adding child plugs and connecting
corresponding OSL shaders. You can drive RGB layers with a color,
or connect individual channels to a float.
If you want to add multiple channels at once, you can also add a closure plug,
which can accept a connection from an OSLCode with a combined output closure.
""",
"layout:customWidget:footer:widgetType", "GafferOSLUI.OSLImageUI._ChannelsFooter",
"layout:customWidget:footer:index", -1,
"nodule:type", "GafferUI::CompoundNodule",
"noduleLayout:section", "left",
"noduleLayout:spacing", 0.2,
"plugValueWidget:type", "GafferUI.LayoutPlugValueWidget",
# Add + button for showing and hiding parameters in the GraphEditor
"noduleLayout:customGadget:addButton:gadgetType", "GafferOSLUI.OSLImageUI.PlugAdder",
],
"channels.*" : [
# Although the parameters plug is positioned
# as we want above, we must also register
# appropriate values for each individual parameter,
# for the case where they get promoted to a box
# individually.
"noduleLayout:section", "left",
"nodule:type", "GafferUI::CompoundNodule",
"nameValuePlugPlugValueWidget:ignoreNamePlug", lambda plug : isinstance( plug["value"], GafferOSL.ClosurePlug ),
],
"channels.*.name" : [
"nodule:type", "",
"stringPlugValueWidget:placeholderText", lambda plug : "[RGB]" if isinstance( plug.parent()["value"], Gaffer.Color3fPlug ) else None,
],
"channels.*.enabled" : [
"nodule:type", "",
],
"channels.*.value" : [
# Although the parameters plug is positioned
# as we want above, we must also register
# appropriate values for each individual parameter,
# for the case where they get promoted to a box
# individually.
"noduleLayout:section", "left",
"noduleLayout:label", __channelLabelFromPlug,
"ui:visibleDimensions", lambda plug : 2 if hasattr( plug, "interpretation" ) and plug.interpretation() == IECore.GeometricData.Interpretation.UV else None,
],
}
)
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from troveclient import base
from troveclient import common
from troveclient import utils
from troveclient.v1 import instances
class RootHistory(base.Resource):
def __repr__(self):
return ("<Root History: Instance %s enabled at %s by %s>"
% (self.id, self.created, self.user))
class Management(base.ManagerWithFind):
"""
Manage :class:`Instances` resources.
"""
resource_class = instances.Instance
# Appease the abc gods
def list(self):
pass
def show(self, instance):
"""
Get details of one instance.
:rtype: :class:`Instance`.
"""
return self._get("/mgmt/instances/%s" % base.getid(instance),
'instance')
def index(self, deleted=None, limit=None, marker=None):
"""
Show an overview of all local instances.
Optionally, filter by deleted status.
:rtype: list of :class:`Instance`.
"""
form = ''
if deleted is not None:
if deleted in ('true', 'True', '1'):
form = "?deleted=true"
else:
form = "?deleted=false"
url = "/mgmt/instances%s" % form
return self._paginated(url, "instances", limit, marker)
def root_enabled_history(self, instance):
"""
Get root access history of one instance.
"""
url = "/mgmt/instances/%s/root" % base.getid(instance)
resp, body = self.api.client.get(url)
if not body:
raise Exception("Call to " + url + " did not return a body.")
return body['root_history']
def _action(self, instance_id, body):
"""
Perform a server "action" -- reboot/rebuild/resize/etc.
"""
url = "/mgmt/instances/%s/action" % instance_id
resp, body = self.api.client.post(url, body=body)
common.check_for_exceptions(resp, body, url)
if body:
return self.resource_class(self, body, loaded=True)
return body
def stop(self, instance_id):
"""
Stop the database on an instance
"""
body = {'stop': {}}
self._action(instance_id, body)
def reboot(self, instance_id):
"""
Reboot the underlying OS.
:param instance_id: The :class:`Instance` (or its ID) to share onto.
"""
body = {'reboot': {}}
self._action(instance_id, body)
def migrate(self, instance_id, host=None):
"""
Migrate the instance.
:param instance_id: The :class:`Instance` (or its ID) to share onto.
"""
if host:
body = {'migrate': {'host': host}}
else:
body = {'migrate': {}}
self._action(instance_id, body)
def update(self, instance_id):
"""
Update the guest agent via apt-get.
"""
body = {'update': {}}
self._action(instance_id, body)
def reset_task_status(self, instance_id):
"""
Set the task status to NONE.
"""
body = {'reset-task-status': {}}
self._action(instance_id, body)
def _print_instance(instance):
if instance._info.get('links'):
del(instance._info['links'])
utils.print_dict(instance._info)
@utils.arg('instance', metavar='<instance>', help='ID of the instance.')
@utils.service_type('database')
def do_mgmt_show(cs, args):
"""Show details of an instance"""
instance = cs.management_python_troveclient_ext.show(args.instance)
instance._info['flavor'] = instance.flavor['id']
if hasattr(instance, 'volume') and instance.volume:
instance._info['volume'] = instance.volume['size']
if 'id' in instance.volume:
instance._info['volume_id'] = instance.volume['id']
if 'used' in instance.volume:
instance._info['volume_used'] = instance.volume['used']
if 'status' in instance.volume:
instance._info['volume_status'] = instance.volume['status']
if hasattr(instance, 'ip'):
instance._info['ip'] = ', '.join(instance.ip)
if hasattr(instance, 'datastore'):
instance._info['datastore'] = instance.datastore['type']
instance._info['datastore_version'] = instance.datastore['version']
if hasattr(instance, 'guest_status'):
description = instance.guest_status['state_description']
instance._info['guest_status'] = description
_print_instance(instance)
@utils.arg('--deleted', metavar='<deleted>', default=None,
help='Optional. Filter instances on deleted.')
@utils.service_type('database')
def do_mgmt_list(cs, args):
"""List all instances"""
instances = cs.management_python_troveclient_ext.index(deleted=args.deleted)
for instance in instances:
setattr(instance, 'flavor_id', instance.flavor['id'])
if hasattr(instance, 'volume'):
setattr(instance, 'size', instance.volume['size'])
if hasattr(instance, 'datastore'):
setattr(instance, 'datastore_version',
instance.datastore['version'])
setattr(instance, 'datastore', instance.datastore['type'])
utils.print_list(instances,
['id', 'name', 'tenant_id', 'flavor_id', 'size',
'datastore', 'datastore_version', 'status', 'created',
'deleted_at'])
@utils.arg('instance', metavar='<instance>', help='ID of the instance.')
@utils.service_type('database')
def do_mgmt_root_history(cs, args):
"""Get the root enabled history of an instance"""
ext = cs.management_python_troveclient_ext
history = ext.root_enabled_history(args.instance)
utils.print_dict(history)
@utils.arg('instance', metavar='<instance>', help='ID of the instance.')
def do_mgmt_stop(cs, args):
"""Stop the database on an instance"""
cs.management_python_troveclient_ext.stop(args.instance)
@utils.arg('instance', metavar='<instance>', help='ID of the instance.')
def do_mgmt_reboot(cs, args):
"""Soft reboot an instance"""
cs.management_python_troveclient_ext.reboot(args.instance)
@utils.arg('instance', metavar='<instance>', help='ID of the instance.')
@utils.arg('--host', metavar='<host>', default=None,
help='Optional. Name of the host.')
def do_mgmt_migrate(cs, args):
"""Migrate an instance"""
ext = cs.management_python_troveclient_ext
ext.migrate(args.instance, host=args.host)
@utils.arg('instance', metavar='<instance>', help='ID of the instance.')
def do_mgmt_update(cs, args):
"""Update an instance"""
cs.management_python_troveclient_ext.update(args.instance)
@utils.arg('instance', metavar='<instance>', help='ID of the instance.')
def do_mgmt_reset_task_status(cs, args):
"""Update the task status to None for an instance"""
cs.management_python_troveclient_ext.reset_task_status(args.instance)
|
|
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Bram Cohen, Uoti Urpala
import os
import sys
import socket
import signal
import struct
import thread
from bisect import insort
from cStringIO import StringIO
from traceback import print_exc
from errno import EWOULDBLOCK, ENOBUFS, EINTR
from BitTorrent.platform import bttime
from BitTorrent import WARNING, CRITICAL, FAQ_URL
from BitTorrent.defer import Deferred
try:
from select import poll, error, POLLIN, POLLOUT, POLLERR, POLLHUP
timemult = 1000
except ImportError:
from BitTorrent.selectpoll import poll, error, POLLIN, POLLOUT, POLLERR, POLLHUP
timemult = 1
NOLINGER = struct.pack('ii', 1, 0)
class Handler(object):
# there is only a semantic difference between "made" and "started".
# I prefer "started"
def connection_started(self, s):
self.connection_made(s)
def connection_made(self, s):
pass
def connection_lost(self, s):
pass
# Maybe connection_lost should just have a default 'None' exception parameter
def connection_failed(self, addr, exception):
pass
def connection_flushed(self, s):
pass
def data_came_in(self, addr, datagram):
pass
class SingleSocket(object):
def __init__(self, rawserver, sock, handler, context, addr=None):
self.rawserver = rawserver
self.socket = sock
self.handler = handler
self.buffer = []
self.last_hit = bttime()
self.fileno = sock.fileno()
self.connected = False
self.context = context
self.ip = None
self.port = None
if isinstance(addr, basestring):
# UNIX socket, not really ip
self.ip = addr
else:
peername = (None, None)
try:
peername = self.socket.getpeername()
except socket.error, e:
# UDP raises (107, 'Transport endpoint is not connected')
# but so can a TCP socket we just got from start_connection,
# in which case addr is set and we use it later.
if (e[0] == 107) and (addr == None):
# lies.
# the peer endpoint should be gathered from the
# tuple passed to data_came_in
try:
peername = self.socket.getsockname()
except socket.error, e:
pass
# this is awesome!
# max prefers a value over None, so this is a common case:
# max(('ip', None), ('ip', 1234)) => ('ip', 1234)
# or the default case:
# max(('ip', None), None) => ('ip', None)
self.ip, self.port = max(peername, addr)
def close(self):
sock = self.socket
self.socket = None
self.buffer = []
del self.rawserver.single_sockets[self.fileno]
self.rawserver.poll.unregister(sock)
self.handler = None
if self.rawserver.config['close_with_rst']:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, NOLINGER)
sock.close()
def shutdown(self, val):
self.socket.shutdown(val)
def is_flushed(self):
return len(self.buffer) == 0
def write(self, s):
assert self.socket is not None
self.buffer.append(s)
if len(self.buffer) == 1:
self.try_write()
def try_write(self):
if self.connected:
try:
while self.buffer != []:
amount = self.socket.send(self.buffer[0])
if amount != len(self.buffer[0]):
if amount != 0:
self.buffer[0] = self.buffer[0][amount:]
break
del self.buffer[0]
except socket.error, e:
code, msg = e
if code != EWOULDBLOCK:
self.rawserver.dead_from_write.append(self)
return
if self.buffer == []:
self.rawserver.poll.register(self.socket, POLLIN)
else:
self.rawserver.poll.register(self.socket, POLLIN | POLLOUT)
def default_error_handler(level, message):
print message
class RawServer(object):
def __init__(self, doneflag, config, noisy=True,
errorfunc=default_error_handler, tos=0):
self.config = config
self.tos = tos
self.poll = poll()
# {socket: SingleSocket}
self.single_sockets = {}
self.udp_sockets = {}
self.dead_from_write = []
self.doneflag = doneflag
self.noisy = noisy
self.errorfunc = errorfunc
self.funcs = []
self.externally_added_tasks = []
self.listening_handlers = {}
self.serversockets = {}
self.live_contexts = {None : True}
self.ident = thread.get_ident()
self.to_start = []
self.add_task(self.scan_for_timeouts, config['timeout_check_interval'])
if sys.platform.startswith('win'):
# Windows doesn't support pipes with select(). Just prevent sleeps
# longer than a second instead of proper wakeup for now.
self.wakeupfds = (None, None)
self._wakeup()
else:
self.wakeupfds = os.pipe()
self.poll.register(self.wakeupfds[0], POLLIN)
def _wakeup(self):
self.add_task(self._wakeup, 1)
def add_context(self, context):
self.live_contexts[context] = True
def remove_context(self, context):
del self.live_contexts[context]
self.funcs = [x for x in self.funcs if x[3] != context]
def add_task(self, func, delay, args=(), context=None):
assert thread.get_ident() == self.ident
assert type(args) == list or type(args) == tuple
if context in self.live_contexts:
insort(self.funcs, (bttime() + delay, func, args, context))
def external_add_task(self, func, delay, args=(), context=None):
assert type(args) == list or type(args) == tuple
self.externally_added_tasks.append((func, delay, args, context))
# Wake up the RawServer thread in case it's sleeping in poll()
if self.wakeupfds[1] is not None:
os.write(self.wakeupfds[1], 'X')
def scan_for_timeouts(self):
self.add_task(self.scan_for_timeouts,
self.config['timeout_check_interval'])
t = bttime() - self.config['socket_timeout']
tokill = []
for s in [s for s in self.single_sockets.values() if s not in self.udp_sockets.keys()]:
if s.last_hit < t:
tokill.append(s)
for k in tokill:
if k.socket is not None:
self._close_socket(k)
def create_unixserversocket(filename):
server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server.setblocking(0)
server.bind(filename)
server.listen(5)
return server
create_unixserversocket = staticmethod(create_unixserversocket)
def create_serversocket(port, bind='', reuse=False, tos=0):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if reuse and os.name != 'nt':
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.setblocking(0)
if tos != 0:
try:
server.setsockopt(socket.IPPROTO_IP, socket.IP_TOS, tos)
except:
pass
server.bind((bind, port))
server.listen(5)
return server
create_serversocket = staticmethod(create_serversocket)
def create_udpsocket(port, bind='', reuse=False, tos=0):
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if reuse and os.name != 'nt':
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.setblocking(0)
if tos != 0:
try:
server.setsockopt(socket.IPPROTO_IP, socket.IP_TOS, tos)
except:
pass
server.bind((bind, port))
return server
create_udpsocket = staticmethod(create_udpsocket)
def start_listening(self, serversocket, handler, context=None):
self.listening_handlers[serversocket.fileno()] = (handler, context)
self.serversockets[serversocket.fileno()] = serversocket
self.poll.register(serversocket, POLLIN)
def start_listening_udp(self, serversocket, handler, context=None):
self.listening_handlers[serversocket.fileno()] = (handler, context)
nss = SingleSocket(self, serversocket, handler, context)
self.single_sockets[serversocket.fileno()] = nss
self.udp_sockets[nss] = 1
self.poll.register(serversocket, POLLIN)
def stop_listening(self, serversocket):
del self.listening_handlers[serversocket.fileno()]
del self.serversockets[serversocket.fileno()]
self.poll.unregister(serversocket)
def stop_listening_udp(self, serversocket):
del self.listening_handlers[serversocket.fileno()]
del self.single_sockets[serversocket.fileno()]
l = [s for s in self.udp_sockets.keys() if s.socket == serversocket]
del self.udp_sockets[l[0]]
self.poll.unregister(serversocket)
def start_connection(self, dns, handler=None, context=None, do_bind=True):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(0)
bindaddr = do_bind and self.config['bind']
if bindaddr:
sock.bind((bindaddr, 0))
if self.tos != 0:
try:
sock.setsockopt(socket.IPPROTO_IP, socket.IP_TOS, self.tos)
except:
pass
try:
sock.connect_ex(dns)
except socket.error:
sock.close()
raise
except Exception, e:
sock.close()
raise socket.error(str(e))
self.poll.register(sock, POLLIN)
s = SingleSocket(self, sock, handler, context, dns)
self.single_sockets[sock.fileno()] = s
return s
def _add_pending_connection(self, addr):
pass
def _remove_pending_connection(self, addr):
pass
def async_start_connection(self, dns, handler=None, context=None, do_bind=True):
self.to_start.insert(0, (dns, handler, context, do_bind))
self._start_connection()
return True
def _start_connection(self):
dns, handler, context, do_bind = self.to_start.pop()
try:
s = self.start_connection(dns, handler, context, do_bind)
except Exception, e:
handler.connection_failed(dns, e)
else:
handler.connection_started(s)
def wrap_socket(self, sock, handler, context=None, ip=None, port=None):
sock.setblocking(0)
self.poll.register(sock, POLLIN)
s = SingleSocket(self, sock, handler, context, (ip, port))
self.single_sockets[sock.fileno()] = s
return s
# must be called from the main thread
def install_sigint_handler(self):
signal.signal(signal.SIGINT, self._handler)
def _handler(self, signum, frame):
self.external_add_task(self.doneflag.set, 0)
# Allow pressing ctrl-c multiple times to raise KeyboardInterrupt,
# in case the program is in an infinite loop
signal.signal(signal.SIGINT, signal.default_int_handler)
def _handle_events(self, events):
for sock, event in events:
if sock in self.serversockets:
s = self.serversockets[sock]
if event & (POLLHUP | POLLERR) != 0:
try:
self.poll.unregister(s)
s.close()
except socket.error, e:
self.errorfunc(WARNING, _("failed to unregister or close server socket: %s") % str(e))
self.errorfunc(CRITICAL, _("lost server socket"))
else:
handler, context = self.listening_handlers[sock]
try:
newsock, addr = s.accept()
except socket.error, e:
continue
try:
newsock.setblocking(0)
nss = SingleSocket(self, newsock, handler, context, addr)
self.single_sockets[newsock.fileno()] = nss
self.poll.register(newsock, POLLIN)
self._make_wrapped_call(handler. \
connection_made, (nss,), context=context)
except socket.error, e:
self.errorfunc(WARNING,
_("Error handling accepted connection: ") +
str(e))
else:
s = self.single_sockets.get(sock)
if s is None:
if sock == self.wakeupfds[0]:
# Another thread wrote this just to wake us up.
os.read(sock, 1)
continue
s.connected = True
if event & POLLERR:
self._close_socket(s)
continue
if event & (POLLIN | POLLHUP):
s.last_hit = bttime()
try:
data, addr = s.socket.recvfrom(100000)
except socket.error, e:
code, msg = e
if code != EWOULDBLOCK:
self._close_socket(s)
continue
if data == '' and not self.udp_sockets.has_key(s):
self._close_socket(s)
else:
if not self.udp_sockets.has_key(s):
self._make_wrapped_call(s.handler.data_came_in,
(s, data), s)
else:
self._make_wrapped_call(s.handler.data_came_in,
(addr, data), s)
# data_came_in could have closed the socket (s.socket = None)
if event & POLLOUT and s.socket is not None:
s.try_write()
if s.is_flushed():
self._make_wrapped_call(s.handler.connection_flushed,
(s,), s)
def _pop_externally_added(self):
while self.externally_added_tasks:
task = self.externally_added_tasks.pop(0)
self.add_task(*task)
def listen_forever(self):
ret = 0
self.ident = thread.get_ident()
while not self.doneflag.isSet() and not ret:
ret = self.listen_once()
def listen_once(self, period=1e9):
try:
self._pop_externally_added()
if self.funcs:
period = self.funcs[0][0] - bttime()
if period < 0:
period = 0
events = self.poll.poll(period * timemult)
if self.doneflag.isSet():
return 0
while self.funcs and self.funcs[0][0] <= bttime():
garbage, func, args, context = self.funcs.pop(0)
self._make_wrapped_call(func, args, context=context)
self._close_dead()
self._handle_events(events)
if self.doneflag.isSet():
return 0
self._close_dead()
except error, e:
if self.doneflag.isSet():
return 0
# I can't find a coherent explanation for what the behavior
# should be here, and people report conflicting behavior,
# so I'll just try all the possibilities
code = None
if hasattr(e, '__getitem__'):
code = e[0]
else:
code = e
if code == ENOBUFS:
# log the traceback so we can see where the exception is coming from
print_exc(file = sys.stderr)
self.errorfunc(CRITICAL,
_("Have to exit due to the TCP stack flaking "
"out. Please see the FAQ at %s") % FAQ_URL)
return -1
elif code in (EINTR,):
# add other ignorable error codes here
pass
else:
self.errorfunc(CRITICAL, str(e))
return 0
except KeyboardInterrupt:
print_exc()
return -1
except:
data = StringIO()
print_exc(file=data)
self.errorfunc(CRITICAL, data.getvalue())
return 0
def _make_wrapped_call(self, function, args, socket=None, context=None):
try:
function(*args)
except KeyboardInterrupt:
raise
except Exception, e: # hopefully nothing raises strings
# Incoming sockets can be assigned to a particular torrent during
# a data_came_in call, and it's possible (though not likely) that
# there could be a torrent-specific exception during the same call.
# Therefore read the context after the call.
if socket is not None:
context = socket.context
if self.noisy and context is None:
data = StringIO()
print_exc(file=data)
self.errorfunc(CRITICAL, data.getvalue())
if context is not None:
context.got_exception(e)
def _close_dead(self):
while len(self.dead_from_write) > 0:
old = self.dead_from_write
self.dead_from_write = []
for s in old:
if s.socket is not None:
self._close_socket(s)
def _close_socket(self, s):
sock = s.socket.fileno()
if self.config['close_with_rst']:
s.socket.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, NOLINGER)
s.socket.close()
self.poll.unregister(sock)
del self.single_sockets[sock]
s.socket = None
self._make_wrapped_call(s.handler.connection_lost, (s,), s)
s.handler = None
|
|
# Copyright 2015-2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import logging
from itertools import chain
from devlib.collector.ftrace import TRACE_MARKER_START, TRACE_MARKER_STOP
from wa.utils.misc import isiterable
from wa.utils.types import numeric
logger = logging.getLogger('trace-cmd')
class TraceCmdEvent(object):
"""
A single trace-cmd event. This will appear in the trace cmd report in the format ::
<idle>-0 [000] 3284.126993: sched_rq_runnable_load: cpu=0 load=54
| | | | |___________|
| | | | |
thread cpu timestamp name body
"""
__slots__ = ['thread', 'reporting_cpu_id', 'timestamp', 'name', 'text', 'fields']
def __init__(self, thread, cpu_id, ts, name, body, parser=None):
"""
parameters:
:thread: thread which generated the event
:cpu: cpu on which the event has occurred
:ts: timestamp of the event
:name: the name of the event
:bodytext: a string with the rest of the event text
:parser: optionally, a function that will parse body text to populate
this event's attributes
The parser can be any callable that can be invoked with
parser(event, text)
Where ``event`` is this TraceCmdEvent instance, and ``text`` is the body text to be
parsed. The parser should updated the passed event instance and not return anything
(the return value will be ignored). Any exceptions raised by the parser will be silently
ignored (note that this means that the event's attributes may be partially initialized).
"""
self.thread = thread
self.reporting_cpu_id = int(cpu_id)
self.timestamp = numeric(ts)
self.name = name
self.text = body
self.fields = {}
if parser:
try:
parser(self, self.text)
except Exception: # pylint: disable=broad-except
# unknown format assume user does not care or know how to
# parse self.text
pass
def __getattr__(self, name):
try:
return self.fields[name]
except KeyError:
raise AttributeError(name)
def __str__(self):
return 'TE({} @ {})'.format(self.name, self.timestamp)
__repr__ = __str__
class DroppedEventsEvent(object):
__slots__ = ['thread', 'reporting_cpu_id', 'timestamp', 'name', 'text', 'fields']
def __init__(self, cpu_id):
self.thread = None
self.reporting_cpu_id = None
self.timestamp = None
self.name = 'DROPPED EVENTS DETECTED'
self.text = None
self.fields = {'cpu_id': int(cpu_id)}
def __getattr__(self, name):
try:
return self.fields[name]
except KeyError:
raise AttributeError(name)
def __str__(self):
return 'DROPPED_EVENTS_ON_CPU{}'.format(self.cpu_id)
__repr__ = __str__
def try_convert_to_numeric(v):
try:
if isiterable(v):
return list(map(numeric, v))
else:
return numeric(v)
except ValueError:
return v
def default_body_parser(event, text):
"""
Default parser to attempt to use to parser body text for the event (i.e. after
the "header" common to all events has been parsed). This assumes that the body is
a whitespace-separated list of key=value pairs. The parser will attempt to convert
the value into a numeric type, and failing that, keep it as string.
"""
parts = [e.rsplit(' ', 1) for e in text.strip().split('=')]
parts = [p.strip() for p in chain.from_iterable(parts)]
if not len(parts) % 2:
i = iter(parts)
for k, v in zip(i, i):
try:
v = int(v)
except ValueError:
pass
event.fields[k] = v
def regex_body_parser(regex, flags=0):
"""
Creates an event body parser form the specified regular expression (could be an
``re.RegexObject``, or a string). The regular expression should contain some named
groups, as those will be extracted as the event attributes (unnamed groups and the
reset of the match will be ignored).
If the specified regex is a string, it will be compiled, in which case ``flags`` may
be provided for the resulting regex object (see ``re`` standard module documentation).
If regex is a pre-compiled object, flags will be ignored.
"""
if isinstance(regex, str):
regex = re.compile(regex, flags)
def regex_parser_func(event, text):
match = regex.search(text)
if match:
for k, v in match.groupdict().items():
try:
event.fields[k] = int(v)
except ValueError:
event.fields[k] = v
return regex_parser_func
def sched_switch_parser(event, text):
"""
Sched switch output may be presented in a couple of different formats. One is handled
by a regex. The other format can *almost* be handled by the default parser, if it
weren't for the ``==>`` that appears in the middle.
"""
if text.count('=') == 2: # old format
regex = re.compile(
r'(?P<prev_comm>\S.*):(?P<prev_pid>\d+) \[(?P<prev_prio>\d+)\] (?P<status>\S+)'
r' ==> '
r'(?P<next_comm>\S.*):(?P<next_pid>\d+) \[(?P<next_prio>\d+)\]'
)
parser_func = regex_body_parser(regex)
return parser_func(event, text)
else: # there are more than two "=" -- new format
return default_body_parser(event, text.replace('==>', ''))
def sched_stat_parser(event, text):
"""
sched_stat_* events unclude the units, "[ns]", in an otherwise
regular key=value sequence; so the units need to be stripped out first.
"""
return default_body_parser(event, text.replace(' [ns]', ''))
def sched_wakeup_parser(event, text):
regex = re.compile(r'(?P<comm>\S+):(?P<pid>\d+) \[(?P<prio>\d+)\] success=(?P<success>\d) CPU:(?P<cpu>\d+)')
parse_func = regex_body_parser(regex)
return parse_func(event, text)
# Maps event onto the corresponding parser for its body text. A parser may be
# a callable with signature
#
# parser(event, bodytext)
#
# a re.RegexObject, or a string (in which case it will be compiled into a
# regex). In case of a string/regex, its named groups will be used to populate
# the event's attributes.
EVENT_PARSER_MAP = {
'sched_stat_blocked': sched_stat_parser,
'sched_stat_iowait': sched_stat_parser,
'sched_stat_runtime': sched_stat_parser,
'sched_stat_sleep': sched_stat_parser,
'sched_stat_wait': sched_stat_parser,
'sched_switch': sched_switch_parser,
'sched_wakeup': sched_wakeup_parser,
'sched_wakeup_new': sched_wakeup_parser,
}
TRACE_EVENT_REGEX = re.compile(r'^\s+(?P<thread>\S+.*?\S+)\s+\[(?P<cpu_id>\d+)\]\s+(?P<ts>[\d.]+):\s+'
r'(?P<name>[^:]+):\s+(?P<body>.*?)\s*$')
HEADER_REGEX = re.compile(r'^\s*(?:version|cpus)\s*=\s*([\d.]+)\s*$')
DROPPED_EVENTS_REGEX = re.compile(r'CPU:(?P<cpu_id>\d+) \[\d*\s*EVENTS DROPPED\]')
EMPTY_CPU_REGEX = re.compile(r'CPU \d+ is empty')
class TraceCmdParser(object):
"""
A parser for textual representation of ftrace as reported by trace-cmd
"""
def __init__(self, filter_markers=True, check_for_markers=True, events=None):
"""
Initialize a new trace parser.
:param filter_markers: Specifies whether the trace before the start
marker and after the stop marker should be
filtered out (so only events between the two
markers will be reported). This maybe overriden
based on `check_for_markers` parameter of
`parse()`
:param check_for_markers: Check if the start/stop markers are present
in the trace and ensure that `filter_markers`
is `False` if they aren't
:param events: A list of event names to be reported; if not specified,
all events will be reported.
"""
self.filter_markers = filter_markers
self.check_for_markers = check_for_markers
self.events = events
def parse(self, filepath): # pylint: disable=too-many-branches,too-many-locals
"""
This is a generator for the trace event stream.
:param filepath: The path to the file containg text trace as reported
by trace-cmd
"""
inside_maked_region = False
# pylint: disable=superfluous-parens
filters = [re.compile('^{}$'.format(e)) for e in (self.events or [])]
filter_markers = self.filter_markers
if filter_markers and self.check_for_markers:
with open(filepath) as fh:
for line in fh:
if TRACE_MARKER_START in line:
break
else:
# maker not found force filtering by marker to False
filter_markers = False
with open(filepath) as fh:
for line in fh:
# if processing trace markers, skip marker lines as well as all
# lines outside marked region
if filter_markers:
if not inside_maked_region:
if TRACE_MARKER_START in line:
inside_maked_region = True
continue
elif TRACE_MARKER_STOP in line:
inside_maked_region = False
continue
match = DROPPED_EVENTS_REGEX.search(line)
if match:
yield DroppedEventsEvent(match.group('cpu_id'))
continue
matched = False
for rx in [HEADER_REGEX, EMPTY_CPU_REGEX]:
match = rx.search(line)
if match:
logger.debug(line.strip())
matched = True
break
if matched:
continue
match = TRACE_EVENT_REGEX.search(line)
if not match:
logger.warning('Invalid trace event: "{}"'.format(line))
continue
event_name = match.group('name')
if filters:
found = False
for f in filters:
if f.search(event_name):
found = True
break
if not found:
continue
body_parser = EVENT_PARSER_MAP.get(event_name, default_body_parser)
if isinstance(body_parser, (str, re.Pattern)): # pylint: disable=protected-access
body_parser = regex_body_parser(body_parser)
yield TraceCmdEvent(parser=body_parser, **match.groupdict())
def trace_has_marker(filepath, max_lines_to_check=2000000):
with open(filepath) as fh:
for i, line in enumerate(fh):
if TRACE_MARKER_START in line:
return True
if i >= max_lines_to_check:
break
return False
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exception types for TensorFlow errors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import traceback
import warnings
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.framework import c_api_util
from tensorflow.python.util import compat
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
@tf_export("OpError", "errors.OpError")
class OpError(Exception):
"""A generic error that is raised when TensorFlow execution fails.
Whenever possible, the session will raise a more specific subclass
of `OpError` from the `tf.errors` module.
"""
def __init__(self, node_def, op, message, error_code):
"""Creates a new `OpError` indicating that a particular op failed.
Args:
node_def: The `node_def_pb2.NodeDef` proto representing the op that
failed, if known; otherwise None.
op: The `ops.Operation` that failed, if known; otherwise None.
message: The message string describing the failure.
error_code: The `error_codes_pb2.Code` describing the error.
"""
super(OpError, self).__init__()
self._node_def = node_def
self._op = op
self._message = message
self._error_code = error_code
def __reduce__(self):
# Allow the subclasses to accept less arguments in their __init__.
init_argspec = tf_inspect.getargspec(self.__class__.__init__)
args = tuple(getattr(self, arg) for arg in init_argspec.args[1:])
return self.__class__, args
@property
def message(self):
"""The error message that describes the error."""
return self._message
@property
def op(self):
"""The operation that failed, if known.
*N.B.* If the failed op was synthesized at runtime, e.g. a `Send`
or `Recv` op, there will be no corresponding
`tf.Operation`
object. In that case, this will return `None`, and you should
instead use the `tf.OpError.node_def` to
discover information about the op.
Returns:
The `Operation` that failed, or None.
"""
return self._op
@property
def error_code(self):
"""The integer error code that describes the error."""
return self._error_code
@property
def node_def(self):
"""The `NodeDef` proto representing the op that failed."""
return self._node_def
def __str__(self):
if self._op is not None:
output = ["%s\n\nCaused by op %r, defined at:\n" % (self.message,
self._op.name,)]
curr_traceback_list = traceback.format_list(self._op.traceback)
output.extend(curr_traceback_list)
# pylint: disable=protected-access
original_op = self._op._original_op
# pylint: enable=protected-access
while original_op is not None:
output.append(
"\n...which was originally created as op %r, defined at:\n"
% (original_op.name,))
prev_traceback_list = curr_traceback_list
curr_traceback_list = traceback.format_list(original_op.traceback)
# Attempt to elide large common subsequences of the subsequent
# stack traces.
#
# TODO(mrry): Consider computing the actual longest common subsequence.
is_eliding = False
elide_count = 0
last_elided_line = None
for line, line_in_prev in zip(curr_traceback_list, prev_traceback_list):
if line == line_in_prev:
if is_eliding:
elide_count += 1
last_elided_line = line
else:
output.append(line)
is_eliding = True
elide_count = 0
else:
if is_eliding:
if elide_count > 0:
output.extend(
["[elided %d identical lines from previous traceback]\n"
% (elide_count - 1,), last_elided_line])
is_eliding = False
output.extend(line)
# pylint: disable=protected-access
original_op = original_op._original_op
# pylint: enable=protected-access
output.append("\n%s (see above for traceback): %s\n" %
(type(self).__name__, self.message))
return "".join(output)
else:
return self.message
OK = error_codes_pb2.OK
tf_export("errors.OK").export_constant(__name__, "OK")
CANCELLED = error_codes_pb2.CANCELLED
tf_export("errors.CANCELLED").export_constant(__name__, "CANCELLED")
UNKNOWN = error_codes_pb2.UNKNOWN
tf_export("errors.UNKNOWN").export_constant(__name__, "UNKNOWN")
INVALID_ARGUMENT = error_codes_pb2.INVALID_ARGUMENT
tf_export("errors.INVALID_ARGUMENT").export_constant(__name__,
"INVALID_ARGUMENT")
DEADLINE_EXCEEDED = error_codes_pb2.DEADLINE_EXCEEDED
tf_export("errors.DEADLINE_EXCEEDED").export_constant(__name__,
"DEADLINE_EXCEEDED")
NOT_FOUND = error_codes_pb2.NOT_FOUND
tf_export("errors.NOT_FOUND").export_constant(__name__, "NOT_FOUND")
ALREADY_EXISTS = error_codes_pb2.ALREADY_EXISTS
tf_export("errors.ALREADY_EXISTS").export_constant(__name__, "ALREADY_EXISTS")
PERMISSION_DENIED = error_codes_pb2.PERMISSION_DENIED
tf_export("errors.PERMISSION_DENIED").export_constant(__name__,
"PERMISSION_DENIED")
UNAUTHENTICATED = error_codes_pb2.UNAUTHENTICATED
tf_export("errors.UNAUTHENTICATED").export_constant(__name__, "UNAUTHENTICATED")
RESOURCE_EXHAUSTED = error_codes_pb2.RESOURCE_EXHAUSTED
tf_export("errors.RESOURCE_EXHAUSTED").export_constant(__name__,
"RESOURCE_EXHAUSTED")
FAILED_PRECONDITION = error_codes_pb2.FAILED_PRECONDITION
tf_export("errors.FAILED_PRECONDITION").export_constant(__name__,
"FAILED_PRECONDITION")
ABORTED = error_codes_pb2.ABORTED
tf_export("errors.ABORTED").export_constant(__name__, "ABORTED")
OUT_OF_RANGE = error_codes_pb2.OUT_OF_RANGE
tf_export("errors.OUT_OF_RANGE").export_constant(__name__, "OUT_OF_RANGE")
UNIMPLEMENTED = error_codes_pb2.UNIMPLEMENTED
tf_export("errors.UNIMPLEMENTED").export_constant(__name__, "UNIMPLEMENTED")
INTERNAL = error_codes_pb2.INTERNAL
tf_export("errors.INTERNAL").export_constant(__name__, "INTERNAL")
UNAVAILABLE = error_codes_pb2.UNAVAILABLE
tf_export("errors.UNAVAILABLE").export_constant(__name__, "UNAVAILABLE")
DATA_LOSS = error_codes_pb2.DATA_LOSS
tf_export("errors.DATA_LOSS").export_constant(__name__, "DATA_LOSS")
# pylint: disable=line-too-long
@tf_export("errors.CancelledError")
class CancelledError(OpError):
"""Raised when an operation or step is cancelled.
For example, a long-running operation (e.g.
`tf.QueueBase.enqueue` may be
cancelled by running another operation (e.g.
`tf.QueueBase.close`,
or by `tf.Session.close`.
A step that is running such a long-running operation will fail by raising
`CancelledError`.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `CancelledError`."""
super(CancelledError, self).__init__(node_def, op, message, CANCELLED)
# pylint: enable=line-too-long
@tf_export("errors.UnknownError")
class UnknownError(OpError):
"""Unknown error.
An example of where this error may be returned is if a Status value
received from another address space belongs to an error-space that
is not known to this address space. Also errors raised by APIs that
do not return enough error information may be converted to this
error.
@@__init__
"""
def __init__(self, node_def, op, message, error_code=UNKNOWN):
"""Creates an `UnknownError`."""
super(UnknownError, self).__init__(node_def, op, message, error_code)
@tf_export("errors.InvalidArgumentError")
class InvalidArgumentError(OpError):
"""Raised when an operation receives an invalid argument.
This may occur, for example, if an operation is receives an input
tensor that has an invalid value or shape. For example, the
`tf.matmul` op will raise this
error if it receives an input that is not a matrix, and the
`tf.reshape` op will raise
this error if the new shape does not match the number of elements in the input
tensor.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `InvalidArgumentError`."""
super(InvalidArgumentError, self).__init__(node_def, op, message,
INVALID_ARGUMENT)
@tf_export("errors.DeadlineExceededError")
class DeadlineExceededError(OpError):
"""Raised when a deadline expires before an operation could complete.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `DeadlineExceededError`."""
super(DeadlineExceededError, self).__init__(node_def, op, message,
DEADLINE_EXCEEDED)
@tf_export("errors.NotFoundError")
class NotFoundError(OpError):
"""Raised when a requested entity (e.g., a file or directory) was not found.
For example, running the
`tf.WholeFileReader.read`
operation could raise `NotFoundError` if it receives the name of a file that
does not exist.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `NotFoundError`."""
super(NotFoundError, self).__init__(node_def, op, message, NOT_FOUND)
@tf_export("errors.AlreadyExistsError")
class AlreadyExistsError(OpError):
"""Raised when an entity that we attempted to create already exists.
For example, running an operation that saves a file
(e.g. `tf.train.Saver.save`)
could potentially raise this exception if an explicit filename for an
existing file was passed.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `AlreadyExistsError`."""
super(AlreadyExistsError, self).__init__(node_def, op, message,
ALREADY_EXISTS)
@tf_export("errors.PermissionDeniedError")
class PermissionDeniedError(OpError):
"""Raised when the caller does not have permission to run an operation.
For example, running the
`tf.WholeFileReader.read`
operation could raise `PermissionDeniedError` if it receives the name of a
file for which the user does not have the read file permission.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `PermissionDeniedError`."""
super(PermissionDeniedError, self).__init__(node_def, op, message,
PERMISSION_DENIED)
@tf_export("errors.UnauthenticatedError")
class UnauthenticatedError(OpError):
"""The request does not have valid authentication credentials.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnauthenticatedError`."""
super(UnauthenticatedError, self).__init__(node_def, op, message,
UNAUTHENTICATED)
@tf_export("errors.ResourceExhaustedError")
class ResourceExhaustedError(OpError):
"""Some resource has been exhausted.
For example, this error might be raised if a per-user quota is
exhausted, or perhaps the entire file system is out of space.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `ResourceExhaustedError`."""
super(ResourceExhaustedError, self).__init__(node_def, op, message,
RESOURCE_EXHAUSTED)
@tf_export("errors.FailedPreconditionError")
class FailedPreconditionError(OpError):
"""Operation was rejected because the system is not in a state to execute it.
This exception is most commonly raised when running an operation
that reads a `tf.Variable`
before it has been initialized.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `FailedPreconditionError`."""
super(FailedPreconditionError, self).__init__(node_def, op, message,
FAILED_PRECONDITION)
@tf_export("errors.AbortedError")
class AbortedError(OpError):
"""The operation was aborted, typically due to a concurrent action.
For example, running a
`tf.QueueBase.enqueue`
operation may raise `AbortedError` if a
`tf.QueueBase.close` operation
previously ran.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `AbortedError`."""
super(AbortedError, self).__init__(node_def, op, message, ABORTED)
@tf_export("errors.OutOfRangeError")
class OutOfRangeError(OpError):
"""Raised when an operation iterates past the valid input range.
This exception is raised in "end-of-file" conditions, such as when a
`tf.QueueBase.dequeue`
operation is blocked on an empty queue, and a
`tf.QueueBase.close`
operation executes.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `OutOfRangeError`."""
super(OutOfRangeError, self).__init__(node_def, op, message,
OUT_OF_RANGE)
@tf_export("errors.UnimplementedError")
class UnimplementedError(OpError):
"""Raised when an operation has not been implemented.
Some operations may raise this error when passed otherwise-valid
arguments that it does not currently support. For example, running
the `tf.nn.max_pool` operation
would raise this error if pooling was requested on the batch dimension,
because this is not yet supported.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnimplementedError`."""
super(UnimplementedError, self).__init__(node_def, op, message,
UNIMPLEMENTED)
@tf_export("errors.InternalError")
class InternalError(OpError):
"""Raised when the system experiences an internal error.
This exception is raised when some invariant expected by the runtime
has been broken. Catching this exception is not recommended.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `InternalError`."""
super(InternalError, self).__init__(node_def, op, message, INTERNAL)
@tf_export("errors.UnavailableError")
class UnavailableError(OpError):
"""Raised when the runtime is currently unavailable.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnavailableError`."""
super(UnavailableError, self).__init__(node_def, op, message,
UNAVAILABLE)
@tf_export("errors.DataLossError")
class DataLossError(OpError):
"""Raised when unrecoverable data loss or corruption is encountered.
For example, this may be raised by running a
`tf.WholeFileReader.read`
operation, if the file is truncated while it is being read.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `DataLossError`."""
super(DataLossError, self).__init__(node_def, op, message, DATA_LOSS)
_CODE_TO_EXCEPTION_CLASS = {
CANCELLED: CancelledError,
UNKNOWN: UnknownError,
INVALID_ARGUMENT: InvalidArgumentError,
DEADLINE_EXCEEDED: DeadlineExceededError,
NOT_FOUND: NotFoundError,
ALREADY_EXISTS: AlreadyExistsError,
PERMISSION_DENIED: PermissionDeniedError,
UNAUTHENTICATED: UnauthenticatedError,
RESOURCE_EXHAUSTED: ResourceExhaustedError,
FAILED_PRECONDITION: FailedPreconditionError,
ABORTED: AbortedError,
OUT_OF_RANGE: OutOfRangeError,
UNIMPLEMENTED: UnimplementedError,
INTERNAL: InternalError,
UNAVAILABLE: UnavailableError,
DATA_LOSS: DataLossError,
}
c_api.PyExceptionRegistry_Init(_CODE_TO_EXCEPTION_CLASS)
_EXCEPTION_CLASS_TO_CODE = {
class_: code for code, class_ in _CODE_TO_EXCEPTION_CLASS.items()}
@tf_export("errors.exception_type_from_error_code")
def exception_type_from_error_code(error_code):
return _CODE_TO_EXCEPTION_CLASS[error_code]
@tf_export("errors.error_code_from_exception_type")
def error_code_from_exception_type(cls):
return _EXCEPTION_CLASS_TO_CODE[cls]
def _make_specific_exception(node_def, op, message, error_code):
try:
exc_type = exception_type_from_error_code(error_code)
return exc_type(node_def, op, message)
except KeyError:
warnings.warn("Unknown error code: %d" % error_code)
return UnknownError(node_def, op, message, error_code)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
# TODO(b/77295559): expand use of TF_Status* SWIG typemap and deprecate this.
@tf_export("errors.raise_exception_on_not_ok_status") # pylint: disable=invalid-name
class raise_exception_on_not_ok_status(object):
"""Context manager to check for C API status."""
def __enter__(self):
self.status = c_api_util.ScopedTFStatus()
return self.status.status
def __exit__(self, type_arg, value_arg, traceback_arg):
try:
if c_api.TF_GetCode(self.status.status) != 0:
raise _make_specific_exception(
None, None,
compat.as_text(c_api.TF_Message(self.status.status)),
c_api.TF_GetCode(self.status.status))
# Delete the underlying status object from memory otherwise it stays alive
# as there is a reference to status from this from the traceback due to
# raise.
finally:
del self.status
return False # False values do not suppress exceptions
|
|
"""Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
# XXX : copied from scikit-learn
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <fpedregosa@acm.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD
from __future__ import division
import collections
from operator import itemgetter
import inspect
import warnings
import numpy as np
import scipy
from scipy import linalg, sparse
from math import ceil, log
from numpy.fft import irfft
from distutils.version import LooseVersion
from functools import partial
from .externals import six
from .externals.six.moves import copyreg
from gzip import GzipFile
###############################################################################
# Misc
class gzip_open(GzipFile): # python2.6 doesn't have context managing
def __init__(self, *args, **kwargs):
return GzipFile.__init__(self, *args, **kwargs)
def __enter__(self):
if hasattr(GzipFile, '__enter__'):
return GzipFile.__enter__(self)
else:
return self
def __exit__(self, exc_type, exc_value, traceback):
if hasattr(GzipFile, '__exit__'):
return GzipFile.__exit__(self, exc_type, exc_value, traceback)
else:
return self.close()
class _Counter(collections.defaultdict):
"""Partial replacement for Python 2.7 collections.Counter."""
def __init__(self, iterable=(), **kwargs):
super(_Counter, self).__init__(int, **kwargs)
self.update(iterable)
def most_common(self):
return sorted(six.iteritems(self), key=itemgetter(1), reverse=True)
def update(self, other):
"""Adds counts for elements in other"""
if isinstance(other, self.__class__):
for x, n in six.iteritems(other):
self[x] += n
else:
for x in other:
self[x] += 1
try:
Counter = collections.Counter
except AttributeError:
Counter = _Counter
def _unique(ar, return_index=False, return_inverse=False):
"""A replacement for the np.unique that appeared in numpy 1.4.
While np.unique existed long before, keyword return_inverse was
only added in 1.4.
"""
try:
ar = ar.flatten()
except AttributeError:
if not return_inverse and not return_index:
items = sorted(set(ar))
return np.asarray(items)
else:
ar = np.asarray(ar).flatten()
if ar.size == 0:
if return_inverse and return_index:
return ar, np.empty(0, np.bool), np.empty(0, np.bool)
elif return_inverse or return_index:
return ar, np.empty(0, np.bool)
else:
return ar
if return_inverse or return_index:
perm = ar.argsort()
aux = ar[perm]
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if return_inverse:
iflag = np.cumsum(flag) - 1
iperm = perm.argsort()
if return_index:
return aux[flag], perm[flag], iflag[iperm]
else:
return aux[flag], iflag[iperm]
else:
return aux[flag], perm[flag]
else:
ar.sort()
flag = np.concatenate(([True], ar[1:] != ar[:-1]))
return ar[flag]
if LooseVersion(np.__version__) < LooseVersion('1.5'):
unique = _unique
else:
unique = np.unique
def _bincount(X, weights=None, minlength=None):
"""Replacing np.bincount in numpy < 1.6 to provide minlength."""
result = np.bincount(X, weights)
if minlength is None or len(result) >= minlength:
return result
out = np.zeros(minlength, np.int)
out[:len(result)] = result
return out
if LooseVersion(np.__version__) < LooseVersion('1.6'):
bincount = _bincount
else:
bincount = np.bincount
def _copysign(x1, x2):
"""Slow replacement for np.copysign, which was introduced in numpy 1.4"""
return np.abs(x1) * np.sign(x2)
if not hasattr(np, 'copysign'):
copysign = _copysign
else:
copysign = np.copysign
def _in1d(ar1, ar2, assume_unique=False, invert=False):
"""Replacement for in1d that is provided for numpy >= 1.4"""
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
if not hasattr(np, 'in1d') or LooseVersion(np.__version__) < '1.8':
in1d = _in1d
else:
in1d = np.in1d
def _digitize(x, bins, right=False):
"""Replacement for digitize with right kwarg (numpy < 1.7).
Notes
-----
This fix is only meant for integer arrays. If ``right==True`` but either
``x`` or ``bins`` are of a different type, a NotImplementedError will be
raised.
"""
if right:
x = np.asarray(x)
bins = np.asarray(bins)
if (x.dtype.kind not in 'ui') or (bins.dtype.kind not in 'ui'):
raise NotImplementedError("Only implemented for integer input")
return np.digitize(x - 1e-5, bins)
else:
return np.digitize(x, bins)
if LooseVersion(np.__version__) < LooseVersion('1.7'):
digitize = _digitize
else:
digitize = np.digitize
def _tril_indices(n, k=0):
"""Replacement for tril_indices that is provided for numpy >= 1.4"""
mask = np.greater_equal(np.subtract.outer(np.arange(n), np.arange(n)), -k)
indices = np.where(mask)
return indices
if not hasattr(np, 'tril_indices'):
tril_indices = _tril_indices
else:
tril_indices = np.tril_indices
def _unravel_index(indices, dims):
"""Add support for multiple indices in unravel_index that is provided
for numpy >= 1.4"""
indices_arr = np.asarray(indices)
if indices_arr.size == 1:
return np.unravel_index(indices, dims)
else:
if indices_arr.ndim != 1:
raise ValueError('indices should be one dimensional')
ndims = len(dims)
unraveled_coords = np.empty((indices_arr.size, ndims), dtype=np.int)
for coord, idx in zip(unraveled_coords, indices_arr):
coord[:] = np.unravel_index(idx, dims)
return tuple(unraveled_coords.T)
if LooseVersion(np.__version__) < LooseVersion('1.4'):
unravel_index = _unravel_index
else:
unravel_index = np.unravel_index
def _qr_economic_old(A, **kwargs):
"""
Compat function for the QR-decomposition in economic mode
Scipy 0.9 changed the keyword econ=True to mode='economic'
"""
with warnings.catch_warnings(record=True):
return linalg.qr(A, econ=True, **kwargs)
def _qr_economic_new(A, **kwargs):
return linalg.qr(A, mode='economic', **kwargs)
if LooseVersion(scipy.__version__) < LooseVersion('0.9'):
qr_economic = _qr_economic_old
else:
qr_economic = _qr_economic_new
def savemat(file_name, mdict, oned_as="column", **kwargs):
"""MATLAB-format output routine that is compatible with SciPy 0.7's.
0.7.2 (or .1?) added the oned_as keyword arg with 'column' as the default
value. It issues a warning if this is not provided, stating that "This will
change to 'row' in future versions."
"""
import scipy.io
try:
return scipy.io.savemat(file_name, mdict, oned_as=oned_as, **kwargs)
except TypeError:
return scipy.io.savemat(file_name, mdict, **kwargs)
if hasattr(np, 'count_nonzero'):
from numpy import count_nonzero
else:
def count_nonzero(X):
return len(np.flatnonzero(X))
# little dance to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
def _meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,) * (ndim - 2)
output[1].shape = (-1, 1) + (1,) * (ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
if LooseVersion(np.__version__) < LooseVersion('1.7'):
meshgrid = _meshgrid
else:
meshgrid = np.meshgrid
###############################################################################
# Back porting firwin2 for older scipy
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def _firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):
"""FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`. If the gain at the Nyquist rate, `gain[-1]`, is not 0,
then `numtaps` must be odd.
freq : array-like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array-like
The filter gains at the frequency sampling points.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
Returns
-------
taps : numpy 1D array of length `numtaps`
The filter coefficients of the FIR filter.
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0]) # doctest: +SKIP
>>> print(taps[72:78]) # doctest: +SKIP
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The filter is Type I if `numtaps`
is odd and Type II if `numtaps` is even. Because Type II filters always
have a zero at the Nyquist frequency, `numtaps` must be odd if `gain[-1]`
is not zero.
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s'
% (numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if numtaps % 2 == 0 and gain[-1] != 0.0:
raise ValueError("A filter with an even number of coefficients must "
"have zero gain at the Nyquist rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq) - 1 and freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from scipy.signal.signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
return out
def get_firwin2():
"""Helper to get firwin2"""
try:
from scipy.signal import firwin2
except ImportError:
firwin2 = _firwin2
return firwin2
def _filtfilt(*args, **kwargs):
"""wrap filtfilt, excluding padding arguments"""
from scipy.signal import filtfilt
# cut out filter args
if len(args) > 4:
args = args[:4]
if 'padlen' in kwargs:
del kwargs['padlen']
return filtfilt(*args, **kwargs)
def get_filtfilt():
"""Helper to get filtfilt from scipy"""
from scipy.signal import filtfilt
if 'padlen' in inspect.getargspec(filtfilt)[0]:
return filtfilt
return _filtfilt
###############################################################################
# Back porting matrix_rank for numpy < 1.7
def _matrix_rank(M, tol=None):
""" Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that
are greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for
linear least squares [2].
This default threshold is designed to detect rank deficiency accounting
for the numerical errors of the SVD computation. Imagine that there is a
column in `M` that is an exact (in floating point) linear combination of
other columns in `M`. Computing the SVD on `M` will not produce a
singular value exactly equal to 0 in general: any difference of the
smallest SVD value from 0 will be caused by numerical imprecision in the
calculation of the SVD. Our threshold for small SVD values takes this
numerical imprecision into account, and the default threshold will detect
such numerical rank deficiency. The threshold may declare a matrix `M`
rank deficient even if the linear combination of some columns of `M` is
not exactly equal to another column of `M` but only numerically very
close to another column of `M`.
We chose our default threshold because it is in wide use. Other
thresholds are possible. For example, elsewhere in the 2007 edition of
*Numerical recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance
values to detect *effective* rank deficiency. The most useful measure of
the tolerance depends on the operations you intend to use on your matrix.
For example, if your data come from uncertain measurements with
uncertainties greater than floating point epsilon, choosing a tolerance
near that uncertainty may be preferable. The tolerance may be absolute if
the uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = np.asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return np.int(not all(M == 0))
S = np.linalg.svd(M, compute_uv=False)
if tol is None:
tol = S.max() * np.max(M.shape) * np.finfo(S.dtype).eps
return np.sum(S > tol)
if LooseVersion(np.__version__) > '1.7.1':
from numpy.linalg import matrix_rank
else:
matrix_rank = _matrix_rank
def _reconstruct_partial(func, args, kwargs):
"""Helper to pickle partial functions"""
return partial(func, *args, **(kwargs or {}))
def _reduce_partial(p):
"""Helper to pickle partial functions"""
return _reconstruct_partial, (p.func, p.args, p.keywords)
# This adds pickling functionality to older Python 2.6
# Please always import partial from here.
copyreg.pickle(partial, _reduce_partial)
def normalize_colors(vmin, vmax, clip=False):
"""Helper to handle matplotlib API"""
import matplotlib.pyplot as plt
try:
return plt.Normalize(vmin, vmax, clip=clip)
except AttributeError:
return plt.normalize(vmin, vmax, clip=clip)
def assert_true(expr, msg='False is not True'):
"""Fake assert_true without message"""
if not expr:
raise AssertionError(msg)
def assert_is(expr1, expr2, msg=None):
"""Fake assert_is without message"""
assert_true(expr2 is expr2, msg)
def assert_is_not(expr1, expr2, msg=None):
"""Fake assert_is_not without message"""
assert_true(expr1 is not expr2, msg)
def _sparse_block_diag(mats, format=None, dtype=None):
"""An implementation of scipy.sparse.block_diag since old versions of
scipy don't have it. Forms a sparse matrix by stacking matrices in block
diagonal form.
Parameters
----------
mats : list of matrices
Input matrices.
format : str, optional
The sparse format of the result (e.g. "csr"). If not given, the
matrix is returned in "coo" format.
dtype : dtype specifier, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of blocks.
Returns
-------
res : sparse matrix
"""
nmat = len(mats)
rows = []
for ia, a in enumerate(mats):
row = [None] * nmat
row[ia] = a
rows.append(row)
return sparse.bmat(rows, format=format, dtype=dtype)
try:
from scipy.sparse import block_diag as sparse_block_diag
except Exception:
sparse_block_diag = _sparse_block_diag
def _isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within a
tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns
-------
y : array_like
Returns a boolean array of where `a` and `b` are equal within the
given tolerance. If both `a` and `b` are scalars, returns a single
boolean value.
See Also
--------
allclose
Notes
-----
.. versionadded:: 1.7.0
For finite values, isclose uses the following equation to test whether
two floating point values are equivalent.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`isclose(a, b)` might be different from `isclose(b, a)` in
some rare cases.
Examples
--------
>>> isclose([1e10,1e-7], [1.00001e10,1e-8])
array([ True, False], dtype=bool)
>>> isclose([1e10,1e-8], [1.00001e10,1e-9])
array([ True, True], dtype=bool)
>>> isclose([1e10,1e-8], [1.0001e10,1e-9])
array([False, True], dtype=bool)
>>> isclose([1.0, np.nan], [1.0, np.nan])
array([ True, False], dtype=bool)
>>> isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
array([ True, True], dtype=bool)
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
# Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
# This will cause casting of x later. Also, make sure to allow subclasses
# (e.g., for numpy.ma).
dt = np.core.multiarray.result_type(y, 1.)
y = np.array(y, dtype=dt, copy=False, subok=True)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Because we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
both_nan = np.isnan(x) & np.isnan(y)
cond[both_nan] = both_nan[both_nan]
return cond
if LooseVersion(np.__version__) < LooseVersion('1.7'):
isclose = _isclose
else:
isclose = np.isclose
|
|
#!/usr/bin/env python
# encoding: utf-8
"""
glpk_Test.py
Created by Nikolaus Sonnenschein on 2008-02-12.
Copyright (c) 2008 Jacobs University of Bremen. All rights reserved.
"""
from ifba.glpki import glpki
import unittest
import util
import glpk
import sys
import copy
import random
import pickle
class test_glpk(unittest.TestCase):
def setUp(self):
self.lp = util.ImportCplex('test_data/model.lp')
self.glp = glpk.glpk(self.lp)
def testSimplex(self):
"""Tests if the real glpk simplex function spits out the correct
objective value for the iJR904 model under glucose minimal medium
condition."""
self.glp.simplex()
obj = glpk.glp_get_obj_val(self.glp.lp)
self.assertAlmostEqual(obj, 0.9259122)
def testSimplex2(self):
"""test if the python glpk method works correct."""
self.glp.simplex()
obj = glpk.glp_get_obj_val(self.glp.lp)
obj2 = self.glp.getObjVal()
self.assertAlmostEqual(obj, obj2)
def testModifyColumnBounds(self):
"""Tests if the modifiyBounds method operates correctly and if the
modifications are reversible using the glpk history."""
self.glp.modifyColumnBounds({1 : (0, 0.5)})
self.glp.simplex()
obj = self.glp.getObjVal()
self.assertAlmostEqual(obj, 0.5)
self.glp.undo()
self.glp.simplex()
obj = self.glp.getObjVal()
self.assertAlmostEqual(obj, 0.9259122)
def testModifyRowBounds(self):
"""Tests if the modifiyBounds method operates correctly and if the
modifications are reversible using the glpk history."""
self.glp.modifyRowBounds({1 : (0, 0.5)})
rowType = glpki.glp_get_row_type(self.glp.lp, 1)
rowUb = glpki.glp_get_row_ub(self.glp.lp, 1)
rowLb = glpki.glp_get_row_lb(self.glp.lp, 1)
self.assertEqual(self.glp.getRowBounds()[1], (rowLb, rowUb))
self.assertEqual(rowType, glpki.GLP_DB)
self.assertEqual(self.glp.getRowBounds()[1], (0, 0.5))
self.glp.undo()
rowType = glpki.glp_get_row_type(self.glp.lp, 1)
rowUb = glpki.glp_get_row_ub(self.glp.lp, 1)
rowLb = glpki.glp_get_row_lb(self.glp.lp, 1)
self.assertEqual(rowType, glpki.GLP_FX)
self.assertEqual(self.glp.getRowBounds()[1], (0, 0.))
def testPickle(self):
"""Tests pickleability of glpk objects."""
pickledLP = pickle.dumps(self.glp)
unpickledLP = pickle.loads(pickledLP)
self.glp.simplex()
unpickledLP.simplex()
self.assertAlmostEqual(self.glp.getObjVal(), unpickledLP.getObjVal())
def testCopy(self):
"""Tests if the the magic __copy__ methods is used correctly by the
copy module. It tests specifically if the orginal and copied lp reside
on different memory locations."""
lpcopy = copy.copy(self.glp)
self.assertNotEqual(lpcopy, self.glp)
self.assertNotEqual(lpcopy.lp, self.glp.lp)
def testPresolOnOff(self):
"""Tests if one can savely shutdown the lp presolver. There are
floating point precision differences between GLP_ON and GLP_OFF. So
assertAlmostEqual is used to check if they really are only
max precision errors"""
# num = self.glp.getNumCols() # TODO someday you'll have to change this
num = 10
def func(float):
if float < 0.:
return 0.
else:
return float
for i in range(1, num + 1):
self.glp.smcp.presolve = glpk.GLP_ON
self.glp.modifyColumnBounds({i : (0., 0.)})
self.glp.simplex()
res_on = func(self.glp.getObjVal())
self.glp.undo()
self.glp.smcp.presolve = glpk.GLP_OFF
self.glp.modifyColumnBounds({i : (0., 0.)})
self.glp.simplex()
res_off = func(self.glp.getObjVal())
self.glp.undo()
self.assertAlmostEqual(res_on, res_off)
def testInitialize(self):
"""Tests if initialize returns the same clean lp object as a series
of consecutive undo calls."""
sample = random.sample(range(1, self.glp.getNumCols() + 1), 69)
rndReal = random.uniform
for i in sample:
real1 = rndReal(20., 100.)
self.glp.modifyColumnBounds({i:(0., real1)})
self.glp.initialize()
# self.glp.undo()
# self.glp.undo()
self.glp.simplex()
obj = self.glp.getObjVal()
self.assertAlmostEqual(obj, 0.9259122)
def testObjectiveFunctionality(self):
"""Tests if the getter and setter function for the lp objective work
and if they really are reversible."""
initObjList = self.glp.getObjective()
self.glp.setObjective({2 : 1.})
self.assertNotEqual(self.glp.getObjective(), initObjList)
self.glp.initialize()
self.assertEqual(self.glp.getObjective(), initObjList)
def testSetOptFlag(self):
"""Tests if the getter and setter functions for optimization direction
work and if they are reversible."""
flag = self.glp.getOptFlag()
self.assertEqual(flag, 2)
self.assertEqual(flag, glpk.GLP_MAX)
self.glp.setOptFlag('MiN')
flag = self.glp.getOptFlag()
self.assertEqual(flag, glpk.GLP_MIN)
self.glp.initialize()
flag = self.glp.getOptFlag()
self.assertEqual(flag, glpk.GLP_MAX)
def testGetBounds(self):
"""Tests if the getColumnBounds method return a correct dictionary of column
bounds."""
# boundsDict = self.glp.getColumnBounds()
# self.assertEqual(boundsDict)
pass
def testDeleteColumns(self):
"""Tests if a the specified column is deleted from the constraint
matrix"""
self.assertEqual(self.glp.getNumCols(), 1473)
colCoef = self.glp.getColumnCoef(66)
self.glp.deleteColumns([66])
self.assertEqual(self.glp.getNumCols(), 1472)
# now we check if this can be undone
self.glp.undo()
self.assertEqual(self.glp.getColumnCoef(1473), colCoef)
self.assertEqual(self.glp.getNumCols(), 1473)
def testDeleteRows(self):
"""Tests if a the specified row is deleted from the constraint
matrix"""
self.assertEqual(self.glp.getNumRows(), 904)
rowCoef = self.glp.getRowCoef(800)
self.glp.deleteRows([800])
self.assertEqual(self.glp.getNumRows(), 903)
# now we check if this can be undone
self.glp.undo()
self.assertEqual(self.glp.getNumRows(), 904)
self.assertEqual(self.glp.getRowCoef(904), rowCoef)
def testAddColumns(self):
"""Tests if a the specified column is appended to the constraint
matrix"""
self.assertEqual(self.glp.getNumCols(), 1473)
newColumArray = self.glp.getColumnCoef(1)
self.glp.addColumns({'R("R_HansWurs")': (0., 99999., newColumArray)})
self.assertEqual(self.glp.getNumCols(), 1474)
self.assertEqual(self.glp.getColumnCoef(1), self.glp.getColumnCoef(1474))
# now we check if this can be undone
self.glp.undo()
self.assertEqual(self.glp.getNumCols(), 1473)
self.assertEqual(len(self.glp.history), 0)
def testAddRows(self):
"""Tests if a the specified column is appended to the constraint
matrix"""
self.assertEqual(self.glp.getNumRows(), 904)
newColumArray = self.glp.getRowCoef(1)
self.glp.addRows({'Mwurstb': (0., 99999., newColumArray)})
self.assertEqual(self.glp.getNumRows(), 905)
self.assertEqual(self.glp.getRowCoef(1), self.glp.getRowCoef(905))
# now we check if this can be undone
self.glp.undo()
self.assertEqual(self.glp.getNumRows(), 904)
self.assertEqual(len(self.glp.history), 0)
def testSetColumnKinds(self):
"""Tests the setColumnKinds command functionality and if it is undoable"""
# Check if setColumnsKinds works for already available reacations
self.assertEqual(set(self.glp.getColumnKinds().values()), set([1]))
for i in range(1, self.glp.getNumCols() + 1, 50):
colBounds = self.glp.getColumnBounds()[i]
self.glp.setColumnKinds({i:glpki.GLP_BV})
self.assertEqual(self.glp.getColumnKinds([i]),{i:glpki.GLP_BV})
self.assertEqual(self.glp.getColumnBounds()[i], (0., 1.))
self.glp.initialize()
self.assertEqual(self.glp.getColumnKinds([i]),{i:glpki.GLP_CV})
self.assertEqual(self.glp.getColumnBounds()[i], colBounds)
self.glp.setColumnKinds({i:glpki.GLP_IV})
self.assertEqual(self.glp.getColumnKinds([i]),{i:glpki.GLP_IV})
self.assertEqual(self.glp.getColumnBounds()[i], colBounds)
self.glp.initialize()
self.assertEqual(self.glp.getColumnKinds([i]),{i:glpki.GLP_CV})
self.assertEqual(self.glp.getColumnBounds()[i], colBounds)
def testCheckIndexValidities(self):
"""Check if an IndexError is raised if an column or row index is out
of range."""
self.assertRaises(IndexError, self.glp._setColumnBound, 1474, 0., 0.)
self.assertRaises(IndexError, self.glp._setColumnBound, 0, 0., 0.)
self.assertRaises(IndexError, self.glp._setRowBound, 905, 0., 0.)
self.assertRaises(IndexError, self.glp._setRowBound, 0, 0., 0.)
self.assertRaises(IndexError, self.glp.translateColumnIndices, [0])
self.assertRaises(IndexError, self.glp.translateColumnIndices, [1474])
self.assertRaises(IndexError, self.glp.translateRowIndices, [0])
self.assertRaises(IndexError, self.glp.translateRowIndices, [905])
def testTranslateRowColumnNames(self):
"""docstring for testTranslateRowColumnNames"""
self.assertEqual(self.glp.translateColumnNames(['R("R_BiomassEcoli")',
'R("R_XYLI1_Rev")']), [1,1473])
self.assertRaises(Exception, self.glp.translateColumnNames, ['R("R_Stub")'])
self.assertEqual(self.glp.translateRowNames(['Matpc', 'MglcDb']), [223, 430])
self.assertRaises(Exception, self.glp.translateRowNames, ['Mstubc'])
class test_sparseList(unittest.TestCase):
def setUp(self):
self.spL = glpk.sparseList([1, -3, 0, 0, -6., 0, 3, 0, 0, 0, 0., 3.])
def testIt(self):
self.assertEqual(self.spL, {1:1,2:-3,5:-6.,7:3,12:3.})
if __name__ == '__main__':
# unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(test_glpk)
unittest.TextTestRunner(verbosity=6).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(test_sparseList)
unittest.TextTestRunner(verbosity=6).run(suite)
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gettext
import socket
import uuid
from babel import localedata
import mock
import webob
from keystone.common import environment
from keystone.common import wsgi
from keystone import exception
from keystone.openstack.common.fixture import moxstubout
from keystone.openstack.common import gettextutils
from keystone.openstack.common.gettextutils import _
from keystone.openstack.common import jsonutils
from keystone import tests
class FakeApp(wsgi.Application):
def index(self, context):
return {'a': 'b'}
class BaseWSGITest(tests.TestCase):
def setUp(self):
self.app = FakeApp()
super(BaseWSGITest, self).setUp()
def _make_request(self, url='/'):
req = webob.Request.blank(url)
args = {'action': 'index', 'controller': None}
req.environ['wsgiorg.routing_args'] = [None, args]
return req
class ApplicationTest(BaseWSGITest):
def test_response_content_type(self):
req = self._make_request()
resp = req.get_response(self.app)
self.assertEqual(resp.content_type, 'application/json')
def test_query_string_available(self):
class FakeApp(wsgi.Application):
def index(self, context):
return context['query_string']
req = self._make_request(url='/?1=2')
resp = req.get_response(FakeApp())
self.assertEqual(jsonutils.loads(resp.body), {'1': '2'})
def test_headers_available(self):
class FakeApp(wsgi.Application):
def index(self, context):
return context['headers']
app = FakeApp()
req = self._make_request(url='/?1=2')
req.headers['X-Foo'] = "bar"
resp = req.get_response(app)
self.assertIn('X-Foo', eval(resp.body))
def test_render_response(self):
data = {'attribute': 'value'}
body = '{"attribute": "value"}'
resp = wsgi.render_response(body=data)
self.assertEqual(resp.status, '200 OK')
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, body)
self.assertEqual(resp.headers.get('Vary'), 'X-Auth-Token')
self.assertEqual(resp.headers.get('Content-Length'), str(len(body)))
def test_render_response_custom_status(self):
resp = wsgi.render_response(status=(501, 'Not Implemented'))
self.assertEqual(resp.status, '501 Not Implemented')
self.assertEqual(resp.status_int, 501)
def test_render_response_custom_headers(self):
resp = wsgi.render_response(headers=[('Custom-Header', 'Some-Value')])
self.assertEqual(resp.headers.get('Custom-Header'), 'Some-Value')
self.assertEqual(resp.headers.get('Vary'), 'X-Auth-Token')
def test_render_response_no_body(self):
resp = wsgi.render_response()
self.assertEqual(resp.status, '204 No Content')
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.body, '')
self.assertEqual(resp.headers.get('Content-Length'), '0')
self.assertEqual(resp.headers.get('Content-Type'), None)
def test_render_response_head_with_body(self):
resp = wsgi.render_response({'id': uuid.uuid4().hex}, method='HEAD')
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, b'')
self.assertNotEqual(resp.headers.get('Content-Length'), '0')
self.assertEqual(resp.headers.get('Content-Type'), 'application/json')
def test_application_local_config(self):
class FakeApp(wsgi.Application):
def __init__(self, *args, **kwargs):
self.kwargs = kwargs
app = FakeApp.factory({}, testkey="test")
self.assertIn("testkey", app.kwargs)
self.assertEqual("test", app.kwargs["testkey"])
def test_render_exception(self):
e = exception.Unauthorized(message=u'\u7f51\u7edc')
resp = wsgi.render_exception(e)
self.assertEqual(resp.status_int, 401)
def test_render_exception_host(self):
e = exception.Unauthorized(message=u'\u7f51\u7edc')
context = {'host_url': 'http://%s:5000' % uuid.uuid4().hex}
resp = wsgi.render_exception(e, context=context)
self.assertEqual(resp.status_int, 401)
class ExtensionRouterTest(BaseWSGITest):
def test_extensionrouter_local_config(self):
class FakeRouter(wsgi.ExtensionRouter):
def __init__(self, *args, **kwargs):
self.kwargs = kwargs
factory = FakeRouter.factory({}, testkey="test")
app = factory(self.app)
self.assertIn("testkey", app.kwargs)
self.assertEqual("test", app.kwargs["testkey"])
class MiddlewareTest(BaseWSGITest):
def test_middleware_request(self):
class FakeMiddleware(wsgi.Middleware):
def process_request(self, req):
req.environ['fake_request'] = True
return req
req = self._make_request()
resp = FakeMiddleware(None)(req)
self.assertIn('fake_request', resp.environ)
def test_middleware_response(self):
class FakeMiddleware(wsgi.Middleware):
def process_response(self, request, response):
response.environ = {}
response.environ['fake_response'] = True
return response
req = self._make_request()
resp = FakeMiddleware(self.app)(req)
self.assertIn('fake_response', resp.environ)
def test_middleware_bad_request(self):
class FakeMiddleware(wsgi.Middleware):
def process_response(self, request, response):
raise exception.Unauthorized()
req = self._make_request()
req.environ['REMOTE_ADDR'] = '127.0.0.1'
resp = FakeMiddleware(self.app)(req)
self.assertEqual(resp.status_int, exception.Unauthorized.code)
def test_middleware_type_error(self):
class FakeMiddleware(wsgi.Middleware):
def process_response(self, request, response):
raise TypeError()
req = self._make_request()
req.environ['REMOTE_ADDR'] = '127.0.0.1'
resp = FakeMiddleware(self.app)(req)
# This is a validationerror type
self.assertEqual(resp.status_int, exception.ValidationError.code)
def test_middleware_exception_error(self):
exception_str = 'EXCEPTIONERROR'
class FakeMiddleware(wsgi.Middleware):
def process_response(self, request, response):
raise exception.UnexpectedError(exception_str)
def do_request():
req = self._make_request()
resp = FakeMiddleware(self.app)(req)
self.assertEqual(resp.status_int, exception.UnexpectedError.code)
return resp
# Exception data should not be in the message when debug is False
self.config_fixture.config(debug=False)
self.assertNotIn(exception_str, do_request().body)
# Exception data should be in the message when debug is True
self.config_fixture.config(debug=True)
self.assertIn(exception_str, do_request().body)
def test_middleware_local_config(self):
class FakeMiddleware(wsgi.Middleware):
def __init__(self, *args, **kwargs):
self.kwargs = kwargs
factory = FakeMiddleware.factory({}, testkey="test")
app = factory(self.app)
self.assertIn("testkey", app.kwargs)
self.assertEqual("test", app.kwargs["testkey"])
class LocalizedResponseTest(tests.TestCase):
def setUp(self):
super(LocalizedResponseTest, self).setUp()
gettextutils._AVAILABLE_LANGUAGES.clear()
self.addCleanup(gettextutils._AVAILABLE_LANGUAGES.clear)
fixture = self.useFixture(moxstubout.MoxStubout())
self.stubs = fixture.stubs
def _set_expected_languages(self, all_locales=[], avail_locales=None):
# Override localedata.locale_identifiers to return some locales.
def returns_some_locales(*args, **kwargs):
return all_locales
self.stubs.Set(localedata, 'locale_identifiers', returns_some_locales)
# Override gettext.find to return other than None for some languages.
def fake_gettext_find(lang_id, *args, **kwargs):
found_ret = '/keystone/%s/LC_MESSAGES/keystone.mo' % lang_id
if avail_locales is None:
# All locales are available.
return found_ret
languages = kwargs['languages']
if languages[0] in avail_locales:
return found_ret
return None
self.stubs.Set(gettext, 'find', fake_gettext_find)
def test_request_match_default(self):
# The default language if no Accept-Language is provided is None
req = webob.Request.blank('/')
self.assertIsNone(wsgi.best_match_language(req))
def test_request_match_language_expected(self):
# If Accept-Language is a supported language, best_match_language()
# returns it.
self._set_expected_languages(all_locales=['it'])
req = webob.Request.blank('/', headers={'Accept-Language': 'it'})
self.assertEqual(wsgi.best_match_language(req), 'it')
def test_request_match_language_unexpected(self):
# If Accept-Language is a language we do not support,
# best_match_language() returns None.
self._set_expected_languages(all_locales=['it'])
req = webob.Request.blank('/', headers={'Accept-Language': 'zh'})
self.assertIsNone(wsgi.best_match_language(req))
def test_static_translated_string_is_Message(self):
# Statically created message strings are Message objects so that they
# are lazy-translated.
self.assertIsInstance(exception.Unauthorized.message_format,
gettextutils.Message)
def test_dynamic_translated_string_is_Message(self):
# Dynamically created message strings are Message objects so that they
# are lazy-translated.
self.assertIsInstance(_('The resource could not be found.'),
gettextutils.Message)
class ServerTest(tests.TestCase):
def setUp(self):
super(ServerTest, self).setUp()
environment.use_eventlet()
self.host = '127.0.0.1'
self.port = '1234'
@mock.patch('eventlet.listen')
@mock.patch('socket.getaddrinfo')
def test_keepalive_unset(self, mock_getaddrinfo, mock_listen):
mock_getaddrinfo.return_value = [(1, 2, 3, 4, 5)]
mock_sock = mock.Mock()
mock_sock.setsockopt = mock.Mock()
mock_listen.return_value = mock_sock
server = environment.Server(mock.MagicMock(), host=self.host,
port=self.port)
server.start()
self.assertTrue(mock_listen.called)
self.assertFalse(mock_sock.setsockopt.called)
@mock.patch('eventlet.listen')
@mock.patch('socket.getaddrinfo')
def test_keepalive_set(self, mock_getaddrinfo, mock_listen):
mock_getaddrinfo.return_value = [(1, 2, 3, 4, 5)]
mock_sock = mock.Mock()
mock_sock.setsockopt = mock.Mock()
mock_listen.return_value = mock_sock
server = environment.Server(mock.MagicMock(), host=self.host,
port=self.port, keepalive=True)
server.start()
mock_sock.setsockopt.assert_called_once_with(socket.SOL_SOCKET,
socket.SO_KEEPALIVE,
1)
self.assertTrue(mock_listen.called)
@mock.patch('eventlet.listen')
@mock.patch('socket.getaddrinfo')
def test_keepalive_and_keepidle_set(self, mock_getaddrinfo, mock_listen):
mock_getaddrinfo.return_value = [(1, 2, 3, 4, 5)]
mock_sock = mock.Mock()
mock_sock.setsockopt = mock.Mock()
mock_listen.return_value = mock_sock
server = environment.Server(mock.MagicMock(), host=self.host,
port=self.port, keepalive=True,
keepidle=1)
server.start()
# keepidle isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
self.assertEqual(mock_sock.setsockopt.call_count, 2)
# Test the last set of call args i.e. for the keepidle
mock_sock.setsockopt.assert_called_with(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
1)
else:
self.assertEqual(mock_sock.setsockopt.call_count, 1)
self.assertTrue(mock_listen.called)
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from affine import Affine
from shapely.geometry import shape
import numpy as np
import numpy.distutils.system_info as sysinfo
import warnings
from .io import read_features, Raster
from .utils import (rasterize_geom, get_percentile, check_stats,
remap_categories, key_assoc_val, boxify_points)
def raster_stats(*args, **kwargs):
"""Deprecated. Use zonal_stats instead."""
warnings.warn("'raster_stats' is an alias to 'zonal_stats'"
" and will disappear in 1.0", DeprecationWarning)
return zonal_stats(*args, **kwargs)
def zonal_stats(*args, **kwargs):
"""The primary zonal statistics entry point.
All arguments are passed directly to ``gen_zonal_stats``.
See its docstring for details.
The only difference is that ``zonal_stats`` will
return a list rather than a generator."""
return list(gen_zonal_stats(*args, **kwargs))
def gen_zonal_stats(
vectors, raster,
layer=0,
band=1,
nodata=None,
affine=None,
stats=None,
all_touched=False,
categorical=False,
category_map=None,
add_stats=None,
zone_func=None,
raster_out=False,
prefix=None,
geojson_out=False, **kwargs):
"""Zonal statistics of raster values aggregated to vector geometries.
Parameters
----------
vectors: path to an vector source or geo-like python objects
raster: ndarray or path to a GDAL raster source
If ndarray is passed, the ``affine`` kwarg is required.
layer: int or string, optional
If `vectors` is a path to an fiona source,
specify the vector layer to use either by name or number.
defaults to 0
band: int, optional
If `raster` is a GDAL source, the band number to use (counting from 1).
defaults to 1.
nodata: float, optional
If `raster` is a GDAL source, this value overrides any NODATA value
specified in the file's metadata.
If `None`, the file's metadata's NODATA value (if any) will be used.
defaults to `None`.
affine: Affine instance
required only for ndarrays, otherwise it is read from src
stats: list of str, or space-delimited str, optional
Which statistics to calculate for each zone.
All possible choices are listed in ``utils.VALID_STATS``.
defaults to ``DEFAULT_STATS``, a subset of these.
all_touched: bool, optional
Whether to include every raster cell touched by a geometry, or only
those having a center point within the polygon.
defaults to `False`
categorical: bool, optional
category_map: dict
A dictionary mapping raster values to human-readable categorical names.
Only applies when categorical is True
add_stats: dict
with names and functions of additional stats to compute, optional
zone_func: callable
function to apply to zone ndarray prior to computing stats
raster_out: boolean
Include the masked numpy array for each feature?, optional
Each feature dictionary will have the following additional keys:
mini_raster_array: The clipped and masked numpy array
mini_raster_affine: Affine transformation
mini_raster_nodata: NoData Value
prefix: string
add a prefix to the keys (default: None)
geojson_out: boolean
Return list of GeoJSON-like features (default: False)
Original feature geometry and properties will be retained
with zonal stats appended as additional properties.
Use with `prefix` to ensure unique and meaningful property names.
Returns
-------
generator of dicts (if geojson_out is False)
Each item corresponds to a single vector feature and
contains keys for each of the specified stats.
generator of geojson features (if geojson_out is True)
GeoJSON-like Feature as python dict
"""
stats, run_count = check_stats(stats, categorical)
# Handle 1.0 deprecations
transform = kwargs.get('transform')
if transform:
warnings.warn("GDAL-style transforms will disappear in 1.0. "
"Use affine=Affine.from_gdal(*transform) instead",
DeprecationWarning)
if not affine:
affine = Affine.from_gdal(*transform)
cp = kwargs.get('copy_properties')
if cp:
warnings.warn("Use `geojson_out` to preserve feature properties",
DeprecationWarning)
band_num = kwargs.get('band_num')
if band_num:
warnings.warn("Use `band` to specify band number", DeprecationWarning)
band = band_num
with Raster(raster, affine, nodata, band) as rast:
features_iter = read_features(vectors, layer)
for _, feat in enumerate(features_iter):
geom = shape(feat['geometry'])
if 'Point' in geom.type:
geom = boxify_points(geom, rast)
geom_bounds = tuple(geom.bounds)
fsrc = rast.read(bounds=geom_bounds)
# rasterized geometry
rv_array = rasterize_geom(geom, like=fsrc, all_touched=all_touched)
# nodata mask
isnodata = (fsrc.array == fsrc.nodata)
# add nan mask (if necessary)
has_nan = (
np.issubdtype(fsrc.array.dtype, np.floating)
and np.isnan(fsrc.array.min()))
if has_nan:
isnodata = (isnodata | np.isnan(fsrc.array))
# Mask the source data array
# mask everything that is not a valid value or not within our geom
masked = np.ma.MaskedArray(
fsrc.array,
mask=(isnodata | ~rv_array))
# If we're on 64 bit platform and the array is an integer type
# make sure we cast to 64 bit to avoid overflow.
# workaround for https://github.com/numpy/numpy/issues/8433
if sysinfo.platform_bits == 64 and \
masked.dtype != np.int64 and \
issubclass(masked.dtype.type, np.integer):
masked = masked.astype(np.int64)
# execute zone_func on masked zone ndarray
if zone_func is not None:
if not callable(zone_func):
raise TypeError(('zone_func must be a callable '
'which accepts function a '
'single `zone_array` arg.'))
zone_func(masked)
if masked.compressed().size == 0:
# nothing here, fill with None and move on
feature_stats = dict([(stat, None) for stat in stats])
if 'count' in stats: # special case, zero makes sense here
feature_stats['count'] = 0
else:
if run_count:
keys, counts = np.unique(masked.compressed(), return_counts=True)
try:
pixel_count = dict(zip([k.item() for k in keys],
[c.item() for c in counts]))
except AttributeError:
pixel_count = dict(zip([np.asscalar(k) for k in keys],
[np.asscalar(c) for c in counts]))
if categorical:
feature_stats = dict(pixel_count)
if category_map:
feature_stats = remap_categories(category_map, feature_stats)
else:
feature_stats = {}
if 'min' in stats:
feature_stats['min'] = float(masked.min())
if 'max' in stats:
feature_stats['max'] = float(masked.max())
if 'mean' in stats:
feature_stats['mean'] = float(masked.mean())
if 'count' in stats:
feature_stats['count'] = int(masked.count())
# optional
if 'sum' in stats:
feature_stats['sum'] = float(masked.sum())
if 'std' in stats:
feature_stats['std'] = float(masked.std())
if 'median' in stats:
feature_stats['median'] = float(np.median(masked.compressed()))
if 'majority' in stats:
feature_stats['majority'] = float(key_assoc_val(pixel_count, max))
if 'minority' in stats:
feature_stats['minority'] = float(key_assoc_val(pixel_count, min))
if 'unique' in stats:
feature_stats['unique'] = len(list(pixel_count.keys()))
if 'range' in stats:
try:
rmin = feature_stats['min']
except KeyError:
rmin = float(masked.min())
try:
rmax = feature_stats['max']
except KeyError:
rmax = float(masked.max())
feature_stats['range'] = rmax - rmin
for pctile in [s for s in stats if s.startswith('percentile_')]:
q = get_percentile(pctile)
pctarr = masked.compressed()
feature_stats[pctile] = np.percentile(pctarr, q)
if 'nodata' in stats or 'nan' in stats:
featmasked = np.ma.MaskedArray(fsrc.array, mask=(~rv_array))
if 'nodata' in stats:
feature_stats['nodata'] = float((featmasked == fsrc.nodata).sum())
if 'nan' in stats:
feature_stats['nan'] = float(np.isnan(featmasked).sum()) if has_nan else 0
if add_stats is not None:
for stat_name, stat_func in add_stats.items():
try:
feature_stats[stat_name] = stat_func(masked, feat['properties'])
except TypeError:
# backwards compatible with single-argument function
feature_stats[stat_name] = stat_func(masked)
if raster_out:
feature_stats['mini_raster_array'] = masked
feature_stats['mini_raster_affine'] = fsrc.affine
feature_stats['mini_raster_nodata'] = fsrc.nodata
if prefix is not None:
prefixed_feature_stats = {}
for key, val in feature_stats.items():
newkey = "{}{}".format(prefix, key)
prefixed_feature_stats[newkey] = val
feature_stats = prefixed_feature_stats
if geojson_out:
for key, val in feature_stats.items():
if 'properties' not in feat:
feat['properties'] = {}
feat['properties'][key] = val
yield feat
else:
yield feature_stats
|
|
from collections import deque
import operator
import unittest
from transducer.eager import transduce
from transducer.functional import compose
from transducer.infrastructure import Transducer
from transducer.reducers import appending, expecting_single, conjoining, adding
from transducer.transducers import (mapping, filtering, reducing, enumerating, first, last,
reversing, ordering, counting, scanning, taking, dropping_while, distinct,
taking_while, dropping, element_at, mapcatting, pairwise, batching, windowing,
repeating)
class TestSingleTransducers(unittest.TestCase):
def test_identity(self):
result = transduce(transducer=Transducer,
reducer=appending(),
iterable=range(5))
self.assertListEqual(result, [0, 1, 2, 3, 4])
def test_mapping(self):
result = transduce(transducer=mapping(lambda x: x*x),
reducer=appending(),
iterable=range(5))
self.assertListEqual(result, [0, 1, 4, 9, 16])
def test_filtering(self):
result = transduce(transducer=filtering(lambda w: 'x' in w),
reducer=appending(),
iterable='socks in box fox on clocks'.split())
self.assertListEqual(result, ['box', 'fox'])
def test_reducing(self):
result = transduce(transducer=reducing(operator.add),
reducer=expecting_single(),
iterable=range(10))
self.assertEqual(result, 45)
def test_reducing_with_init(self):
result = transduce(transducer=reducing(operator.add, 10),
reducer=expecting_single(),
iterable=range(10))
self.assertEqual(result, 55)
def test_scanning(self):
result = transduce(transducer=scanning(operator.add),
reducer=appending(),
iterable=range(5))
self.assertListEqual(result, [0, 1, 3, 6, 10])
def test_scanning_with_init(self):
result = transduce(transducer=scanning(operator.add, 3),
reducer=appending(),
iterable=range(5))
self.assertListEqual(result, [3, 4, 6, 9, 13])
def test_enumerating(self):
result = transduce(transducer=enumerating(),
reducer=appending(),
iterable=[2, 4, 6, 8, 10])
self.assertListEqual(result, [(0, 2), (1, 4), (2, 6), (3, 8), (4, 10)])
def test_enumerating_with_start(self):
result = transduce(transducer=enumerating(start=3),
reducer=appending(),
iterable=[2, 4, 6, 8, 10])
self.assertListEqual(result, [(3, 2), (4, 4), (5, 6), (6, 8), (7, 10)])
def test_mapcatting(self):
result = transduce(transducer=mapcatting(list),
reducer=appending(),
iterable=['new', 'found', 'land'])
self.assertListEqual(result, list("newfoundland"))
def test_taking(self):
result = transduce(transducer=taking(3),
reducer=appending(),
iterable=[2, 4, 5, 8, 10])
self.assertListEqual(result, [2, 4, 5])
def test_taking_validation(self):
with self.assertRaises(ValueError):
transduce(transducer=taking(-3),
reducer=appending(),
iterable=[2, 4, 5, 8, 10])
def test_taking_while(self):
result = transduce(transducer=taking_while(lambda x: x < 6),
reducer=appending(),
iterable=[2, 4, 5, 8, 10])
self.assertListEqual(result, [2, 4, 5])
def test_dropping(self):
result = transduce(transducer=dropping(3),
reducer=appending(),
iterable=[2, 4, 5, 8, 10])
self.assertListEqual(result, [8, 10])
def test_dropping_validation(self):
with self.assertRaises(ValueError):
transduce(transducer=dropping(-3),
reducer=appending(),
iterable=[2, 4, 5, 8, 10])
def test_dropping_while(self):
result = transduce(transducer=dropping_while(lambda x: x < 6),
reducer=appending(),
iterable=[2, 4, 5, 8, 10])
self.assertListEqual(result, [8, 10])
def test_distinct(self):
result = transduce(transducer=distinct(),
reducer=appending(),
iterable=[1, 1, 3, 5, 5, 2, 1, 2])
self.assertListEqual(result, [1, 3, 5, 2])
def test_pairwise_at_least_two(self):
result = transduce(transducer=pairwise(),
reducer=appending(),
iterable=[1, 3, 5, 7, 2, 1, 9])
self.assertListEqual(result, [(1, 3), (3, 5), (5, 7), (7, 2), (2, 1), (1, 9)])
def test_pairwise_single(self):
"""A single item fed into pairwise is discarded."""
result = transduce(transducer=pairwise(),
reducer=appending(),
iterable=[42])
self.assertListEqual(result, [])
def test_batching_exact(self):
result = transduce(transducer=batching(3),
reducer=appending(),
iterable=[42, 12, 45, 9, 18, 3, 34, 13, 12])
self.assertListEqual(result, [[42, 12, 45], [9, 18, 3], [34, 13, 12]])
def test_batching_inexact_1(self):
result = transduce(transducer=batching(3),
reducer=appending(),
iterable=[42, 12, 45, 9, 18, 3, 34])
self.assertListEqual(result, [[42, 12, 45], [9, 18, 3], [34]])
def test_batching_inexact_2(self):
result = transduce(transducer=batching(3),
reducer=appending(),
iterable=[42, 12, 45, 9, 18, 3, 34, 13])
self.assertListEqual(result, [[42, 12, 45], [9, 18, 3], [34, 13]])
def test_batching_validation(self):
with self.assertRaises(ValueError):
transduce(transducer=batching(0),
reducer=appending(),
iterable=[42, 12, 45, 9, 18, 3, 34, 13])
def test_windowing_no_padding(self):
result = transduce(transducer=windowing(3, window_type=list),
reducer=appending(),
iterable=[42, 12, 45, 9, 18, 3, 34, 13])
self.assertListEqual(result,
[[42],
[42, 12],
[42, 12, 45],
[12, 45, 9],
[45, 9, 18],
[9, 18, 3],
[18, 3, 34],
[3, 34, 13],
[34, 13],
[13]])
def test_windowing_padding(self):
result = transduce(transducer=windowing(3, padding=0, window_type=list),
reducer=appending(),
iterable=[42, 12, 45, 9, 18, 3, 34, 13])
self.assertListEqual(result,
[[0, 0, 42],
[0, 42, 12],
[42, 12, 45],
[12, 45, 9],
[45, 9, 18],
[9, 18, 3],
[18, 3, 34],
[3, 34, 13],
[34, 13, 0],
[13, 0, 0]])
def test_windowing_validation(self):
with self.assertRaises(ValueError):
transduce(transducer=windowing(0),
reducer=appending(),
iterable=[42, 12, 45, 9, 18, 3, 34, 13])
def test_element_at(self):
result = transduce(transducer=element_at(3),
reducer=expecting_single(),
iterable=[1, 3, 5, 7, 9])
self.assertEqual(result, 7)
def test_element_at_validation(self):
with self.assertRaises(IndexError):
transduce(transducer=element_at(-1),
reducer=expecting_single(),
iterable=[1, 3, 5, 7, 9])
def test_element_at_too_short(self):
with self.assertRaises(IndexError):
transduce(transducer=element_at(3),
reducer=expecting_single(),
iterable=[1, 3, 5])
def test_repeating(self):
result = transduce(transducer=repeating(3),
reducer=appending(),
iterable=[1, 3, 5])
self.assertListEqual(result, [1, 1, 1, 3, 3, 3, 5, 5, 5])
def test_repeating_zero(self):
result = transduce(transducer=repeating(0),
reducer=appending(),
iterable=[1, 3, 5])
self.assertListEqual(result, [])
def test_repeating_validation(self):
with self.assertRaises(ValueError):
transduce(transducer=repeating(-1),
reducer=appending(),
iterable=[1, 3, 5])
def test_first(self):
result = transduce(transducer=first(),
reducer=expecting_single(),
iterable=[2, 4, 6, 8, 10])
self.assertEqual(result, 2)
def test_first_with_predicate(self):
result = transduce(transducer=first(lambda x: x > 5),
reducer=expecting_single(),
iterable=[2, 4, 6, 8, 10])
self.assertEqual(result, 6)
def test_last(self):
result = transduce(transducer=last(),
reducer=expecting_single(),
iterable=[2, 4, 6, 8, 10])
self.assertEqual(result, 10)
def test_last_with_predicate(self):
result = transduce(transducer=last(lambda x: x < 7),
reducer=expecting_single(),
iterable=[2, 4, 6, 8, 10])
self.assertEqual(result, 6)
def test_reversing(self):
result = transduce(transducer=reversing(),
reducer=appending(),
iterable=[2, 4, 6, 8, 10])
self.assertSequenceEqual(result, [10, 8, 6, 4, 2])
def test_reversing_preserves_mutable_sequence_type(self):
result = transduce(transducer=reversing(),
reducer=appending(),
iterable=[2, 4, 6, 8, 10])
self.assertIsInstance(result, list)
self.assertSequenceEqual(result, [10, 8, 6, 4, 2])
def test_ordering(self):
result = transduce(transducer=ordering(),
reducer=appending(),
iterable=[4, 2, 6, 10, 8])
self.assertSequenceEqual(result, [2, 4, 6, 8, 10])
def test_ordering_preserves_mutable_sequence_type(self):
result = transduce(transducer=ordering(),
reducer=appending(),
iterable=[4, 2, 6, 10, 8],
init=deque())
self.assertIsInstance(result, deque)
self.assertSequenceEqual(result, deque([2, 4, 6, 8, 10]))
def test_ordering_preserves_immutable_sequence_type(self):
result = transduce(transducer=ordering(),
reducer=conjoining(),
iterable=[4, 2, 6, 10, 8])
self.assertIsInstance(result, tuple)
self.assertSequenceEqual(result, (2, 4, 6, 8, 10))
def test_ordering_reverse(self):
result = transduce(transducer=ordering(reverse=True),
reducer=appending(),
iterable=[4, 2, 6, 10, 8])
self.assertSequenceEqual(result, [10, 8, 6, 4, 2])
def test_ordering_with_key(self):
result = transduce(transducer=ordering(key=lambda x: len(x)),
reducer=appending(),
iterable="The quick brown fox jumped".split())
self.assertSequenceEqual(result, ['The', 'fox', 'quick', 'brown', 'jumped'])
def test_ordering_reverse_with_key(self):
result = transduce(transducer=ordering(key=lambda x: len(x), reverse=True),
reducer=appending(),
iterable="The quick brown fox jumped".split())
self.assertSequenceEqual(result, ['jumped', 'quick', 'brown', 'The', 'fox'])
def test_counting(self):
result = transduce(transducer=counting(),
reducer=expecting_single(),
iterable="The quick brown fox jumped".split())
self.assertEqual(result, 5)
def test_counting_with_predicate(self):
result = transduce(transducer=counting(lambda w: 'o' in w),
reducer=expecting_single(),
iterable="The quick brown fox jumped".split())
self.assertEqual(result, 2)
def test_mutable_inits(self):
"""Tests that the same mutable init object isn't shared across invocations."""
result = transduce(transducer=mapping(lambda x: x), reducer=appending(), iterable=range(3))
self.assertListEqual(result, [0, 1, 2])
result = transduce(transducer=mapping(lambda x: x), reducer=appending(), iterable=range(3))
self.assertListEqual(result, [0, 1, 2])
def test_adding_reducer(self):
result = transduce(
transducer=mapping(lambda x: x * x),
reducer=adding(),
iterable=list(range(3)) * 2)
self.assertListEqual(list(result), [0, 1, 4])
class TestComposedTransducers(unittest.TestCase):
def test_chained_transducers(self):
result = transduce(transducer=compose(
mapping(lambda x: x*x),
filtering(lambda x: x % 5 != 0),
taking(6),
dropping_while(lambda x: x < 15),
distinct()),
reducer=appending(),
iterable=range(20))
self.assertSequenceEqual(result, [16, 36, 49])
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.