gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import unittest
import PRESUBMIT
class MockInputApi(object):
def __init__(self):
self.re = re
self.os_path = os.path
self.files = []
self.is_committing = False
def AffectedFiles(self):
return self.files
class MockOutputApi(object):
class PresubmitResult(object):
def __init__(self, message, items=None, long_text=''):
self.message = message
self.items = items
self.long_text = long_text
class PresubmitError(PresubmitResult):
def __init__(self, message, items, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'error'
class PresubmitPromptWarning(PresubmitResult):
def __init__(self, message, items, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'warning'
class PresubmitNotifyResult(PresubmitResult):
def __init__(self, message, items, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'notify'
class PresubmitPromptOrNotify(PresubmitResult):
def __init__(self, message, items, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'promptOrNotify'
class MockFile(object):
def __init__(self, local_path, new_contents):
self._local_path = local_path
self._new_contents = new_contents
self._changed_contents = [(i + 1, l) for i, l in enumerate(new_contents)]
def ChangedContents(self):
return self._changed_contents
def NewContents(self):
return self._new_contents
def LocalPath(self):
return self._local_path
class MockChange(object):
def __init__(self, changed_files):
self._changed_files = changed_files
def LocalPaths(self):
return self._changed_files
class IncludeOrderTest(unittest.TestCase):
def testSystemHeaderOrder(self):
scope = [(1, '#include <csystem.h>'),
(2, '#include <cppsystem>'),
(3, '#include "acustom.h"')]
all_linenums = [linenum for (linenum, _) in scope]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', all_linenums)
self.assertEqual(0, len(warnings))
def testSystemHeaderOrderMismatch1(self):
scope = [(10, '#include <cppsystem>'),
(20, '#include <csystem.h>'),
(30, '#include "acustom.h"')]
all_linenums = [linenum for (linenum, _) in scope]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', all_linenums)
self.assertEqual(1, len(warnings))
self.assertTrue('20' in warnings[0])
def testSystemHeaderOrderMismatch2(self):
scope = [(10, '#include <cppsystem>'),
(20, '#include "acustom.h"'),
(30, '#include <csystem.h>')]
all_linenums = [linenum for (linenum, _) in scope]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', all_linenums)
self.assertEqual(1, len(warnings))
self.assertTrue('30' in warnings[0])
def testSystemHeaderOrderMismatch3(self):
scope = [(10, '#include "acustom.h"'),
(20, '#include <csystem.h>'),
(30, '#include <cppsystem>')]
all_linenums = [linenum for (linenum, _) in scope]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', all_linenums)
self.assertEqual(2, len(warnings))
self.assertTrue('20' in warnings[0])
self.assertTrue('30' in warnings[1])
def testAlphabeticalOrderMismatch(self):
scope = [(10, '#include <csystem.h>'),
(15, '#include <bsystem.h>'),
(20, '#include <cppsystem>'),
(25, '#include <bppsystem>'),
(30, '#include "bcustom.h"'),
(35, '#include "acustom.h"')]
all_linenums = [linenum for (linenum, _) in scope]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', all_linenums)
self.assertEqual(3, len(warnings))
self.assertTrue('15' in warnings[0])
self.assertTrue('25' in warnings[1])
self.assertTrue('35' in warnings[2])
def testSpecialFirstInclude1(self):
mock_input_api = MockInputApi()
contents = ['#include "some/path/foo.h"',
'#include "a/header.h"']
mock_file = MockFile('some/path/foo.cc', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
def testSpecialFirstInclude2(self):
mock_input_api = MockInputApi()
contents = ['#include "some/other/path/foo.h"',
'#include "a/header.h"']
mock_file = MockFile('some/path/foo.cc', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
def testSpecialFirstInclude3(self):
mock_input_api = MockInputApi()
contents = ['#include "some/path/foo.h"',
'#include "a/header.h"']
mock_file = MockFile('some/path/foo_platform.cc', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
def testSpecialFirstInclude4(self):
mock_input_api = MockInputApi()
contents = ['#include "some/path/bar.h"',
'#include "a/header.h"']
mock_file = MockFile('some/path/foo_platform.cc', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(1, len(warnings))
self.assertTrue('2' in warnings[0])
def testSpecialFirstInclude5(self):
mock_input_api = MockInputApi()
contents = ['#include "some/other/path/foo.h"',
'#include "a/header.h"']
mock_file = MockFile('some/path/foo-suffix.h', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
def testOrderAlreadyWrong(self):
scope = [(1, '#include "b.h"'),
(2, '#include "a.h"'),
(3, '#include "c.h"')]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', [3])
self.assertEqual(0, len(warnings))
def testConflictAdded1(self):
scope = [(1, '#include "a.h"'),
(2, '#include "c.h"'),
(3, '#include "b.h"')]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', [2])
self.assertEqual(1, len(warnings))
self.assertTrue('3' in warnings[0])
def testConflictAdded2(self):
scope = [(1, '#include "c.h"'),
(2, '#include "b.h"'),
(3, '#include "d.h"')]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', [2])
self.assertEqual(1, len(warnings))
self.assertTrue('2' in warnings[0])
def testIfElifElseEndif(self):
mock_input_api = MockInputApi()
contents = ['#include "e.h"',
'#define foo',
'#include "f.h"',
'#undef foo',
'#include "e.h"',
'#if foo',
'#include "d.h"',
'#elif bar',
'#include "c.h"',
'#else',
'#include "b.h"',
'#endif',
'#include "a.h"']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
def testSysIncludes(self):
# #include <sys/...>'s can appear in any order.
mock_input_api = MockInputApi()
contents = ['#include <sys/b.h>',
'#include <sys/a.h>']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
def testCheckOnlyCFiles(self):
mock_input_api = MockInputApi()
mock_output_api = MockOutputApi()
contents = ['#include <b.h>',
'#include <a.h>']
mock_file_cc = MockFile('something.cc', contents)
mock_file_h = MockFile('something.h', contents)
mock_file_other = MockFile('something.py', contents)
mock_input_api.files = [mock_file_cc, mock_file_h, mock_file_other]
warnings = PRESUBMIT._CheckIncludeOrder(mock_input_api, mock_output_api)
self.assertEqual(1, len(warnings))
self.assertEqual(2, len(warnings[0].items))
self.assertEqual('promptOrNotify', warnings[0].type)
def testUncheckableIncludes(self):
mock_input_api = MockInputApi()
contents = ['#include <windows.h>',
'#include "b.h"'
'#include "a.h"']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
contents = ['#include "gpu/command_buffer/gles_autogen.h"',
'#include "b.h"'
'#include "a.h"']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
contents = ['#include "gl_mock_autogen.h"',
'#include "b.h"'
'#include "a.h"']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
contents = ['#include "ipc/some_macros.h"',
'#include "b.h"'
'#include "a.h"']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
class VersionControlConflictsTest(unittest.TestCase):
def testTypicalConflict(self):
lines = ['<<<<<<< HEAD',
' base::ScopedTempDir temp_dir_;',
'=======',
' ScopedTempDir temp_dir_;',
'>>>>>>> master']
errors = PRESUBMIT._CheckForVersionControlConflictsInFile(
MockInputApi(), MockFile('some/path/foo_platform.cc', lines))
self.assertEqual(3, len(errors))
self.assertTrue('1' in errors[0])
self.assertTrue('3' in errors[1])
self.assertTrue('5' in errors[2])
class BadExtensionsTest(unittest.TestCase):
def testBadRejFile(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', ''),
MockFile('some/path/foo.cc.rej', ''),
MockFile('some/path2/bar.h.rej', ''),
]
results = PRESUBMIT._CheckPatchFiles(mock_input_api, MockOutputApi())
self.assertEqual(1, len(results))
self.assertEqual(2, len(results[0].items))
self.assertTrue('foo.cc.rej' in results[0].items[0])
self.assertTrue('bar.h.rej' in results[0].items[1])
def testBadOrigFile(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('other/path/qux.h.orig', ''),
MockFile('other/path/qux.h', ''),
MockFile('other/path/qux.cc', ''),
]
results = PRESUBMIT._CheckPatchFiles(mock_input_api, MockOutputApi())
self.assertEqual(1, len(results))
self.assertEqual(1, len(results[0].items))
self.assertTrue('qux.h.orig' in results[0].items[0])
def testGoodFiles(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('other/path/qux.h', ''),
MockFile('other/path/qux.cc', ''),
]
results = PRESUBMIT._CheckPatchFiles(mock_input_api, MockOutputApi())
self.assertEqual(0, len(results))
def testOnlyOwnersFiles(self):
mock_change = MockChange([
'some/path/OWNERS',
'A\Windows\Path\OWNERS',
])
results = PRESUBMIT.GetPreferredTrySlaves(None, mock_change)
self.assertEqual(0, len(results))
class InvalidOSMacroNamesTest(unittest.TestCase):
def testInvalidOSMacroNames(self):
lines = ['#if defined(OS_WINDOWS)',
' #elif defined(OS_WINDOW)',
' # if defined(OS_MACOSX) || defined(OS_CHROME)',
'# else // defined(OS_MAC)',
'#endif // defined(OS_MACOS)']
errors = PRESUBMIT._CheckForInvalidOSMacrosInFile(
MockInputApi(), MockFile('some/path/foo_platform.cc', lines))
self.assertEqual(len(lines), len(errors))
self.assertTrue(':1 OS_WINDOWS' in errors[0])
self.assertTrue('(did you mean OS_WIN?)' in errors[0])
def testValidOSMacroNames(self):
lines = ['#if defined(%s)' % m for m in PRESUBMIT._VALID_OS_MACROS]
errors = PRESUBMIT._CheckForInvalidOSMacrosInFile(
MockInputApi(), MockFile('some/path/foo_platform.cc', lines))
self.assertEqual(0, len(errors))
if __name__ == '__main__':
unittest.main()
|
|
"""
Helper functions for connecting to the Quilt Registry.
"""
import json
import os
import platform
import stat
import subprocess
import sys
import time
import botocore.session
import pkg_resources
import requests
from botocore.credentials import (
CredentialProvider,
CredentialResolver,
RefreshableCredentials,
)
from .util import BASE_PATH, QuiltException, get_from_config
AUTH_PATH = BASE_PATH / 'auth.json'
CREDENTIALS_PATH = BASE_PATH / 'credentials.json'
VERSION = pkg_resources.require('quilt3')[0].version
def _load_auth():
if AUTH_PATH.exists():
with open(AUTH_PATH) as fd:
return json.load(fd)
return {}
def _save_auth(cfg):
BASE_PATH.mkdir(parents=True, exist_ok=True)
with open(AUTH_PATH, 'w') as fd:
AUTH_PATH.chmod(stat.S_IRUSR | stat.S_IWUSR)
json.dump(cfg, fd)
def _load_credentials():
if CREDENTIALS_PATH.exists():
with open(CREDENTIALS_PATH) as fd:
return json.load(fd)
return {}
def _save_credentials(creds):
BASE_PATH.mkdir(parents=True, exist_ok=True)
with open(CREDENTIALS_PATH, 'w') as fd:
CREDENTIALS_PATH.chmod(stat.S_IRUSR | stat.S_IWUSR)
json.dump(creds, fd)
def get_registry_url():
return get_from_config('registryUrl')
def _update_auth(refresh_token, timeout=None):
try:
response = requests.post(
"%s/api/token" % get_registry_url(),
timeout=timeout,
data=dict(
refresh_token=refresh_token,
)
)
except requests.exceptions.ConnectionError as ex:
raise QuiltException("Failed to connect: %s" % ex)
if response.status_code != requests.codes.ok:
raise QuiltException("Authentication error: %s" % response.status_code)
data = response.json()
error = data.get('error')
if error is not None:
raise QuiltException("Failed to log in: %s" % error)
return dict(
refresh_token=data['refresh_token'],
access_token=data['access_token'],
expires_at=data['expires_at']
)
def _handle_response(resp, **kwargs):
if resp.status_code == requests.codes.unauthorized:
raise QuiltException(
"Authentication failed. Run `quilt3 login` again."
)
elif not resp.ok:
try:
data = resp.json()
raise QuiltException(data['message'])
except ValueError:
raise QuiltException("Unexpected failure: error %s" % resp.status_code)
def _create_auth(timeout=None):
"""
Reads the credentials, updates the access token if necessary, and returns it.
"""
url = get_registry_url()
contents = _load_auth()
auth = contents.get(url)
if auth is not None:
# If the access token expires within a minute, update it.
if auth['expires_at'] < time.time() + 60:
try:
auth = _update_auth(auth['refresh_token'], timeout)
except QuiltException as ex:
raise QuiltException(
"Failed to update the access token (%s). Run `quilt login` again." % ex
)
contents[url] = auth
_save_auth(contents)
return auth
def _create_session(auth):
"""
Creates a session object to be used for `push`, `install`, etc.
"""
session = requests.Session()
session.hooks.update(
response=_handle_response
)
session.headers.update({
"Content-Type": "application/json",
"Accept": "application/json",
"User-Agent": "quilt-python/%s (%s %s) %s/%s" % (
VERSION, platform.system(), platform.release(),
platform.python_implementation(), platform.python_version()
)
})
if auth is not None:
session.headers["Authorization"] = "Bearer %s" % auth['access_token']
return session
_session = None
def get_session(timeout=None):
"""
Creates a session or returns an existing session.
"""
global _session
if _session is None:
auth = _create_auth(timeout)
_session = _create_session(auth)
assert _session is not None
return _session
def clear_session():
global _session
if _session is not None:
_session.close()
_session = None
def open_url(url):
try:
if sys.platform == 'win32':
os.startfile(url) # pylint:disable=E1101
elif sys.platform == 'darwin':
with open(os.devnull, 'r+') as null:
subprocess.check_call(['open', url], stdin=null, stdout=null, stderr=null)
else:
with open(os.devnull, 'r+') as null:
subprocess.check_call(['xdg-open', url], stdin=null, stdout=null, stderr=null)
except Exception as ex: # pylint:disable=W0703
print("Failed to launch the browser: %s" % ex)
def login():
"""
Authenticate to your Quilt stack and assume the role assigned to you by
your stack administrator. Not required if you have existing AWS credentials.
Launches a web browser and asks the user for a token.
"""
registry_url = get_registry_url()
if registry_url is None:
raise QuiltException(
"You attempted to authenticate to a Quilt catalog, but your home catalog is "
"currently set to None. Please first specify your home catalog by running "
"\"quilt3.config('$URL')\", replacing '$URL' with your catalog homepage."
)
login_url = "%s/login" % get_registry_url()
print("Launching a web browser...")
print("If that didn't work, please visit the following URL: %s" % login_url)
open_url(login_url)
print()
refresh_token = input("Enter the code from the webpage: ")
login_with_token(refresh_token)
def login_with_token(refresh_token):
"""
Authenticate using an existing token.
"""
# Get an access token and a new refresh token.
auth = _update_auth(refresh_token)
url = get_registry_url()
contents = _load_auth()
contents[url] = auth
_save_auth(contents)
clear_session()
# use registry-provided credentials
_refresh_credentials()
def logout():
"""
Do not use Quilt credentials. Useful if you have existing AWS credentials.
"""
# TODO revoke refresh token (without logging out of web sessions)
if _load_auth() or _load_credentials():
_save_auth({})
_save_credentials({})
else:
print("Already logged out.")
clear_session()
def _refresh_credentials():
session = get_session()
creds = session.get(
"{url}/api/auth/get_credentials".format(
url=get_registry_url()
)
).json()
result = {
'access_key': creds['AccessKeyId'],
'secret_key': creds['SecretAccessKey'],
'token': creds['SessionToken'],
'expiry_time': creds['Expiration']
}
_save_credentials(result)
return result
def logged_in():
"""
Return catalog URL if Quilt client is authenticated. Otherwise
return `None`.
"""
url = get_registry_url()
if url in _load_auth():
return get_from_config('navigator_url')
class QuiltProvider(CredentialProvider):
METHOD = 'quilt-registry'
CANONICAL_NAME = 'QuiltRegistry'
def __init__(self, credentials):
super().__init__()
self._credentials = credentials
def load(self):
creds = RefreshableCredentials.create_from_metadata(
metadata=self._credentials,
method=self.METHOD,
refresh_using=_refresh_credentials,
)
return creds
def create_botocore_session():
botocore_session = botocore.session.get_session()
# If we have saved credentials, use them. Otherwise, create a normal Boto session.
credentials = _load_credentials()
if credentials:
provider = QuiltProvider(credentials)
resolver = CredentialResolver([provider])
botocore_session.register_component('credential_provider', resolver)
return botocore_session
|
|
from flask import Flask, render_template, request, abort, jsonify
from flask import Markup
from monsit import db
import datetime
import json
app = Flask(__name__)
class HostInfo(object):
def __init__(self, host_id, name, is_connected, last_update_time):
self.id = host_id
self.name = name
self.is_connected = is_connected
self.last_update_time = last_update_time
@app.route("/")
def index():
with db.DBConnection() as cnx:
host_infos = []
for host in cnx.get_all_hosts():
host_id = host[0]
host_name = host[1]
infos = cnx.get_host_infos(host_id, [1])
info_json = json.loads(infos[1])
host_infos.append(
HostInfo(host_id, host_name, info_json['connected'],
datetime.datetime.fromtimestamp(info_json['datetime'])))
return render_template('index.html', hosts=host_infos)
@app.route('/hostinfo', methods=['GET'])
def hostinfo():
try:
host_id = int(request.args['id'])
host_name = request.args['name']
except (KeyError, ValueError):
abort(404)
with db.DBConnection() as cnx:
display_setting = cnx.get_display_setting(host_id)
return render_template('hostinfo.html', host_id=host_id, host_name=host_name,
display_setting=Markup(display_setting))
_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
@app.route('/_get_host_stat', methods=['GET'])
def ajax_hoststat():
stat_ids = request.args.getlist('stat_ids[]', type=int)
host_id = request.args.get('host_id', 0, type=int)
with db.DBConnection() as cnx:
host_stats = cnx.get_host_stats(host_id, stat_ids)
return jsonify(return_code=0, stats=host_stats)
@app.route('/_get_latest_stat', methods=['GET'])
def ajax_latest_stat():
stat_ids = request.args.getlist('stat_ids[]', type=int)
host_id = request.args.get('id', 0, type=int)
last_times = request.args.getlist('latest_time[]')
#print 'stat_id', stat_id, 'host_id', host_id, 'last_time', last_time
with db.DBConnection() as cnx:
try:
latest_stats = cnx.get_updated_stats(host_id, stat_ids, last_times)
except:
print 'db error'
return jsonify(return_code=1)
if latest_stats is None:
return jsonify(return_code=1)
return jsonify(return_code=0, stats=latest_stats)
@app.route('/_get_host_info', methods=['GET'])
def ajax_host_info():
info_ids = request.args.getlist('info_ids[]', type=int)
host_id = request.args.get('id', 0, type=int)
print 'host_id', host_id
with db.DBConnection() as cnx:
try:
host_infos = cnx.get_host_infos(host_id, info_ids)
print host_infos
return jsonify(return_code=0, infos=host_infos)
except:
print 'db error'
import traceback
traceback.print_exc()
return jsonify(return_code=1)
@app.route("/add_stat.html")
def add_stat():
with db.DBConnection() as cnx:
host_infos = []
for host in cnx.get_all_hosts():
host_id = host[0]
host_name = host[1]
host_infos.append(HostInfo(host_id, host_name, True, None))
return render_template('add_stat.html', hosts=host_infos,
value_types=db.ValueType.value_type_str)
@app.route("/do_add_stat", methods=['POST'])
def do_add_stat():
host_id = int(request.form['host_id'])
stat_name = request.form['stat_name']
chart_name = request.form['chart_name']
y_value_type = int(request.form['value_type'])
y_unit = request.form['unit']
with db.DBConnection() as cnx:
stat_id = cnx.insert_new_stat(host_id, stat_name, chart_name,
y_value_type, y_unit)
cnx.commit()
return render_template('admin_msg.html',
msg=('New stat id is %d' % stat_id))
@app.route("/add_info.html")
def add_info():
with db.DBConnection() as cnx:
host_infos = []
for host in cnx.get_all_hosts():
host_id = host[0]
host_name = host[1]
host_infos.append(HostInfo(host_id, host_name, True, None))
return render_template('add_info.html', hosts=host_infos)
@app.route("/do_add_info", methods=['POST'])
def do_add_info():
host_id = int(request.form['host_id'])
info_name = request.form['info_name']
chart_name = request.form['chart_name']
with db.DBConnection() as cnx:
info_id = cnx.insert_new_info(host_id, info_name, chart_name)
cnx.commit()
return render_template('admin_msg.html',
msg=('New info id is %d' % info_id))
@app.route("/add_alarm.html")
def add_alarm():
with db.DBConnection() as cnx:
host_infos = []
for host in cnx.get_all_hosts():
host_id = host[0]
host_name = host[1]
host_infos.append(HostInfo(host_id, host_name, True, None))
return render_template('add_alarm.html', hosts=host_infos)
@app.route("/do_add_alarm", methods=['POST'])
def do_add_alarm():
host_id = int(request.form['host_id'])
alarm_name = request.form['alarm_name']
alarm_type = request.form['alarm_type']
stat_or_info_id = int(request.form['stat_info_id'])
threshold_type = request.form['threshold_type']
threshold = request.form['threshold']
if threshold_type == 'int':
threshold = int(threshold)
elif threshold_type == 'double':
threshold = float(threshold)
message = request.form['message']
emails = request.form['emails']
with db.DBConnection() as cnx:
cnx.insert_alarm_setting(host_id, alarm_name, alarm_type,
stat_or_info_id, threshold_type,
threshold, message, emails)
cnx.commit()
return render_template('admin_msg.html',
msg='Alarm has been added successfully')
@app.route("/set_display.html")
def set_display():
with db.DBConnection() as cnx:
host_infos = []
first_host_id = False
for host in cnx.get_all_hosts():
host_id = host[0]
host_name = host[1]
host_infos.append(HostInfo(host_id, host_name, True, None))
if not first_host_id:
first_host_id = host_id
display_setting = cnx.get_display_setting(first_host_id)
return render_template('set_display.html', hosts=host_infos,
display_setting=Markup(display_setting))
@app.route("/do_set_display", methods=['POST'])
def do_set_display():
host_id = int(request.form['host_id'])
display_setting = request.form['display_setting']
with db.DBConnection() as cnx:
cnx.update_display_setting(host_id, display_setting)
cnx.commit()
return render_template(
'admin_msg.html',
msg=('Display for host %d is set successfully' % host_id))
if __name__ == "__main__":
db.init()
app.run(host='0.0.0.0', debug=True)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Lite tooling helper functionality.
EXPERIMENTAL: APIs here are unstable and likely to change without notice.
@@toco_convert
@@toco_convert_protos
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import subprocess
import tempfile
from tensorflow.contrib.lite.toco import model_flags_pb2 as _model_flags_pb2
from tensorflow.contrib.lite.toco import toco_flags_pb2 as _toco_flags_pb2
from tensorflow.contrib.lite.toco import types_pb2 as _types_pb2
from tensorflow.contrib.lite.toco.python.tensorflow_wrap_toco import TocoConvert as _toco_convert_protos
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.platform import resource_loader as _resource_loader
from tensorflow.python.util.all_util import remove_undocumented
# Enum types from the protobuf promoted to the API
FLOAT = _types_pb2.FLOAT
INT32 = _types_pb2.INT32
INT64 = _types_pb2.INT64
STRING = _types_pb2.STRING
QUANTIZED_UINT8 = _types_pb2.QUANTIZED_UINT8
TENSORFLOW_GRAPHDEF = _toco_flags_pb2.TENSORFLOW_GRAPHDEF
TFLITE = _toco_flags_pb2.TFLITE
GRAPHVIZ_DOT = _toco_flags_pb2.GRAPHVIZ_DOT
# Currently the default mode of operation is to shell to another python process
# to protect against crashes. However, it breaks some dependent targets because
# it forces us to depend on an external py_binary. The experimental API doesn't
# have that drawback.
EXPERIMENTAL_USE_TOCO_API_DIRECTLY = True
# Find the toco_from_protos binary using the resource loader if using from
# bazel, otherwise we are in a pip where console_scripts already has
# the toco_from_protos tool.
if EXPERIMENTAL_USE_TOCO_API_DIRECTLY:
_toco_from_proto_bin = ""
else:
_toco_from_proto_bin = _resource_loader.get_path_to_datafile(
"../toco/python/toco_from_protos")
if _toco_from_proto_bin and not os.path.exists(_toco_from_proto_bin):
_toco_from_proto_bin = "toco_from_protos"
def toco_convert_protos(model_flags_str, toco_flags_str, input_data_str):
"""Convert `input_data_str` according to model and toco parameters.
Unless you know what you are doing consider using
the more friendly @{tf.contrib.lite.toco_convert}}.
Args:
model_flags_str: Serialized proto describing model properties, see
`toco/model_flags.proto`.
toco_flags_str: Serialized proto describing conversion properties, see
`toco/toco_flags.proto`.
input_data_str: Input data in serialized form (e.g. a graphdef is common)
Returns:
Converted model in serialized form (e.g. a TFLITE model is common).
Raises:
RuntimeError: When conversion fails, an exception is raised with the error
message embedded.
"""
# TODO(aselle): When toco does not use fatal errors for failure, we can
# switch this on.
if not _toco_from_proto_bin:
return _toco_convert_protos(model_flags_str, toco_flags_str, input_data_str)
with tempfile.NamedTemporaryFile() as fp_toco, \
tempfile.NamedTemporaryFile() as fp_model, \
tempfile.NamedTemporaryFile() as fp_input, \
tempfile.NamedTemporaryFile() as fp_output:
fp_model.write(model_flags_str)
fp_toco.write(toco_flags_str)
fp_input.write(input_data_str)
fp_model.flush()
fp_toco.flush()
fp_input.flush()
cmd = [
_toco_from_proto_bin, fp_model.name, fp_toco.name, fp_input.name,
fp_output.name
]
cmdline = " ".join(cmd)
proc = subprocess.Popen(
cmdline,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True)
stdout, stderr = proc.communicate()
exitcode = proc.returncode
if exitcode == 0:
stuff = fp_output.read()
return stuff
else:
raise RuntimeError("TOCO failed see console for info.\n%s\n%s\n" %
(stdout, stderr))
def _tensor_name(x):
return x.name.split(":")[0]
def toco_convert(input_data,
input_tensors,
output_tensors,
inference_type=FLOAT,
input_format=TENSORFLOW_GRAPHDEF,
output_format=TFLITE,
quantized_input_stats=None,
drop_control_dependency=True):
"""Convert a model using TOCO from `input_format` to `output_format`.
Typically this is to convert from TensorFlow GraphDef to TFLite, in which
case the default `input_format` and `output_format` are sufficient.
Args:
input_data: Input data (i.e. often `sess.graph_def`).
input_tensors: List of input tensors. Type and shape are computed using
`foo.get_shape()` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
inference_type: Currently must be `{FLOAT, QUANTIZED_UINT8}`.
input_format: Type of data to read (currently must be TENSORFLOW_GRAPHDEF).
output_format: Type of data to write (currently must be TFLITE or
GRAPHVIZ_DOT)
quantized_input_stats: For each member of input_tensors the mean and
std deviation of training data. Only needed if `inference_type` is
`QUANTIZED_UINT8`.
drop_control_dependency: Drops control dependencies silently. This is due
to tf lite not supporting control dependencies.
Returns:
The converted data. For example if tflite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
ValueError: If the input tensor type is unknown
RuntimeError: If TOCO fails to convert (in which case the runtime error's
error text will contain the TOCO error log)
"""
toco = _toco_flags_pb2.TocoFlags()
toco.input_format = input_format
toco.output_format = output_format
model = _model_flags_pb2.ModelFlags()
model.drop_control_dependency = drop_control_dependency
toco.inference_type = inference_type
for idx, input_tensor in enumerate(input_tensors):
if input_tensor.dtype == _dtypes.float32:
tflite_input_type = FLOAT
elif input_tensor.dtype == _dtypes.int32:
tflite_input_type = INT32
elif input_tensor.dtype == _dtypes.int64:
tflite_input_type = INT64
# TODO(aselle): Insert strings when they are available
else:
raise ValueError("Tensors %s not known type %r" % (input_tensor.name,
input_tensor.dtype))
input_array = model.input_arrays.add()
if inference_type == QUANTIZED_UINT8:
if tflite_input_type == FLOAT:
tflite_input_type = QUANTIZED_UINT8
input_array.mean, input_array.std = quantized_input_stats[idx]
input_array.name = _tensor_name(input_tensor)
input_array.shape.extend(map(int, input_tensor.get_shape()))
toco.input_types.append(tflite_input_type)
for output_tensor in output_tensors:
model.output_arrays.append(_tensor_name(output_tensor))
data = toco_convert_protos(model.SerializeToString(),
toco.SerializeToString(),
input_data.SerializeToString())
return data
_allowed_symbols = [
"FLOAT",
"INT32",
"INT64",
"STRING",
"QUANTIZED_UINT8",
"TENSORFLOW_GRAPHDEF",
"TFLITE",
"GRAPHVIZ_DOT",
"EXPERIMENTAL_USE_TOCO_API_DIRECTLY",
]
remove_undocumented(__name__, _allowed_symbols)
|
|
# -*- test-case-name: twisted.web.test.test_web -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This is a web-server which integrates with the twisted.internet
infrastructure.
"""
from __future__ import division, absolute_import
import copy
import os
try:
from urllib import quote
except ImportError:
from urllib.parse import quote as _quote
def quote(string, *args, **kwargs):
return _quote(string.decode('charmap'), *args, **kwargs).encode('charmap')
#import zlib
from zope.interface import implementer
from twisted.python.compat import _PY3, networkString, nativeString, intToBytes
if _PY3:
class Copyable:
"""
Fake mixin, until twisted.spread is ported.
"""
else:
from twisted.spread.pb import Copyable, ViewPoint
from twisted.internet import address
from twisted.web import iweb, http, html
from twisted.web.http import unquote
from twisted.python import log, reflect, failure, components
from twisted import copyright
# Re-enable as part of #6178 when twisted.web.util is ported to Python 3:
if not _PY3:
from twisted.web import util as webutil
from twisted.web import resource
from twisted.web.error import UnsupportedMethod
from twisted.python.versions import Version
from twisted.python.deprecate import deprecatedModuleAttribute
if _PY3:
# cgi.escape is deprecated in Python 3.
from html import escape
else:
from cgi import escape
NOT_DONE_YET = 1
__all__ = [
'supportedMethods',
'Request',
'Session',
'Site',
'version',
'NOT_DONE_YET',
'GzipEncoderFactory'
]
# backwards compatability
deprecatedModuleAttribute(
Version("Twisted", 12, 1, 0),
"Please use twisted.web.http.datetimeToString instead",
"twisted.web.server",
"date_time_string")
deprecatedModuleAttribute(
Version("Twisted", 12, 1, 0),
"Please use twisted.web.http.stringToDatetime instead",
"twisted.web.server",
"string_date_time")
date_time_string = http.datetimeToString
string_date_time = http.stringToDatetime
# Support for other methods may be implemented on a per-resource basis.
supportedMethods = ('GET', 'HEAD', 'POST')
def _addressToTuple(addr):
if isinstance(addr, address.IPv4Address):
return ('INET', addr.host, addr.port)
elif isinstance(addr, address.UNIXAddress):
return ('UNIX', addr.name)
else:
return tuple(addr)
@implementer(iweb.IRequest)
class Request(Copyable, http.Request, components.Componentized):
"""
An HTTP request.
@ivar defaultContentType: A C{bytes} giving the default I{Content-Type}
value to send in responses if no other value is set. C{None} disables
the default.
"""
defaultContentType = b"text/html"
site = None
appRootURL = None
__pychecker__ = 'unusednames=issuer'
_inFakeHead = False
_encoder = None
def __init__(self, *args, **kw):
http.Request.__init__(self, *args, **kw)
components.Componentized.__init__(self)
def getStateToCopyFor(self, issuer):
x = self.__dict__.copy()
del x['transport']
# XXX refactor this attribute out; it's from protocol
# del x['server']
del x['channel']
del x['content']
del x['site']
self.content.seek(0, 0)
x['content_data'] = self.content.read()
x['remote'] = ViewPoint(issuer, self)
# Address objects aren't jellyable
x['host'] = _addressToTuple(x['host'])
x['client'] = _addressToTuple(x['client'])
# Header objects also aren't jellyable.
x['requestHeaders'] = list(x['requestHeaders'].getAllRawHeaders())
return x
# HTML generation helpers
def sibLink(self, name):
"""
Return the text that links to a sibling of the requested resource.
"""
if self.postpath:
return (len(self.postpath)*b"../") + name
else:
return name
def childLink(self, name):
"""
Return the text that links to a child of the requested resource.
"""
lpp = len(self.postpath)
if lpp > 1:
return ((lpp-1)*b"../") + name
elif lpp == 1:
return name
else: # lpp == 0
if len(self.prepath) and self.prepath[-1]:
return self.prepath[-1] + b'/' + name
else:
return name
def process(self):
"""
Process a request.
"""
# get site from channel
self.site = self.channel.site
# set various default headers
self.setHeader(b'server', version)
self.setHeader(b'date', http.datetimeToString())
# Resource Identification
self.prepath = []
self.postpath = list(map(unquote, self.path[1:].split(b'/')))
try:
resrc = self.site.getResourceFor(self)
if resource._IEncodingResource.providedBy(resrc):
encoder = resrc.getEncoder(self)
if encoder is not None:
self._encoder = encoder
self.render(resrc)
except:
self.processingFailed(failure.Failure())
def write(self, data):
"""
Write data to the transport (if not responding to a HEAD request).
@param data: A string to write to the response.
"""
if not self.startedWriting:
# Before doing the first write, check to see if a default
# Content-Type header should be supplied.
modified = self.code != http.NOT_MODIFIED
contentType = self.responseHeaders.getRawHeaders(b'content-type')
if modified and contentType is None and self.defaultContentType is not None:
self.responseHeaders.setRawHeaders(
b'content-type', [self.defaultContentType])
# Only let the write happen if we're not generating a HEAD response by
# faking out the request method. Note, if we are doing that,
# startedWriting will never be true, and the above logic may run
# multiple times. It will only actually change the responseHeaders once
# though, so it's still okay.
if not self._inFakeHead:
if self._encoder:
data = self._encoder.encode(data)
http.Request.write(self, data)
def finish(self):
"""
Override C{http.Request.finish} for possible encoding.
"""
if self._encoder:
data = self._encoder.finish()
if data:
http.Request.write(self, data)
return http.Request.finish(self)
def render(self, resrc):
"""
Ask a resource to render itself.
@param resrc: a L{twisted.web.resource.IResource}.
"""
try:
body = resrc.render(self)
except UnsupportedMethod as e:
allowedMethods = e.allowedMethods
if (self.method == b"HEAD") and (b"GET" in allowedMethods):
# We must support HEAD (RFC 2616, 5.1.1). If the
# resource doesn't, fake it by giving the resource
# a 'GET' request and then return only the headers,
# not the body.
log.msg("Using GET to fake a HEAD request for %s" %
(resrc,))
self.method = b"GET"
self._inFakeHead = True
body = resrc.render(self)
if body is NOT_DONE_YET:
log.msg("Tried to fake a HEAD request for %s, but "
"it got away from me." % resrc)
# Oh well, I guess we won't include the content length.
else:
self.setHeader(b'content-length', intToBytes(len(body)))
self._inFakeHead = False
self.method = b"HEAD"
self.write(b'')
self.finish()
return
if self.method in (supportedMethods):
# We MUST include an Allow header
# (RFC 2616, 10.4.6 and 14.7)
self.setHeader('Allow', ', '.join(allowedMethods))
s = ('''Your browser approached me (at %(URI)s) with'''
''' the method "%(method)s". I only allow'''
''' the method%(plural)s %(allowed)s here.''' % {
'URI': escape(self.uri),
'method': self.method,
'plural': ((len(allowedMethods) > 1) and 's') or '',
'allowed': ', '.join(allowedMethods)
})
epage = resource.ErrorPage(http.NOT_ALLOWED,
"Method Not Allowed", s)
body = epage.render(self)
else:
epage = resource.ErrorPage(
http.NOT_IMPLEMENTED, "Huh?",
"I don't know how to treat a %s request." %
(escape(self.method.decode("charmap")),))
body = epage.render(self)
# end except UnsupportedMethod
if body == NOT_DONE_YET:
return
if not isinstance(body, bytes):
body = resource.ErrorPage(
http.INTERNAL_SERVER_ERROR,
"Request did not return bytes",
"Request: " + html.PRE(reflect.safe_repr(self)) + "<br />" +
"Resource: " + html.PRE(reflect.safe_repr(resrc)) + "<br />" +
"Value: " + html.PRE(reflect.safe_repr(body))).render(self)
if self.method == b"HEAD":
if len(body) > 0:
# This is a Bad Thing (RFC 2616, 9.4)
log.msg("Warning: HEAD request %s for resource %s is"
" returning a message body."
" I think I'll eat it."
% (self, resrc))
self.setHeader(b'content-length',
intToBytes(len(body)))
self.write(b'')
else:
self.setHeader(b'content-length',
intToBytes(len(body)))
self.write(body)
self.finish()
def processingFailed(self, reason):
log.err(reason)
# Re-enable on Python 3 as part of #6178:
if not _PY3 and self.site.displayTracebacks:
body = ("<html><head><title>web.Server Traceback (most recent call last)</title></head>"
"<body><b>web.Server Traceback (most recent call last):</b>\n\n"
"%s\n\n</body></html>\n"
% webutil.formatFailure(reason))
else:
body = (b"<html><head><title>Processing Failed</title></head><body>"
b"<b>Processing Failed</b></body></html>")
self.setResponseCode(http.INTERNAL_SERVER_ERROR)
self.setHeader(b'content-type', b"text/html")
self.setHeader(b'content-length', intToBytes(len(body)))
self.write(body)
self.finish()
return reason
def view_write(self, issuer, data):
"""Remote version of write; same interface.
"""
self.write(data)
def view_finish(self, issuer):
"""Remote version of finish; same interface.
"""
self.finish()
def view_addCookie(self, issuer, k, v, **kwargs):
"""Remote version of addCookie; same interface.
"""
self.addCookie(k, v, **kwargs)
def view_setHeader(self, issuer, k, v):
"""Remote version of setHeader; same interface.
"""
self.setHeader(k, v)
def view_setLastModified(self, issuer, when):
"""Remote version of setLastModified; same interface.
"""
self.setLastModified(when)
def view_setETag(self, issuer, tag):
"""Remote version of setETag; same interface.
"""
self.setETag(tag)
def view_setResponseCode(self, issuer, code, message=None):
"""
Remote version of setResponseCode; same interface.
"""
self.setResponseCode(code, message)
def view_registerProducer(self, issuer, producer, streaming):
"""Remote version of registerProducer; same interface.
(requires a remote producer.)
"""
self.registerProducer(_RemoteProducerWrapper(producer), streaming)
def view_unregisterProducer(self, issuer):
self.unregisterProducer()
### these calls remain local
session = None
def getSession(self, sessionInterface = None):
# Session management
if not self.session:
cookiename = b"_".join([b'TWISTED_SESSION'] + self.sitepath)
sessionCookie = self.getCookie(cookiename)
if sessionCookie:
try:
self.session = self.site.getSession(sessionCookie)
except KeyError:
pass
# if it still hasn't been set, fix it up.
if not self.session:
self.session = self.site.makeSession()
self.addCookie(cookiename, self.session.uid, path=b'/')
self.session.touch()
if sessionInterface:
return self.session.getComponent(sessionInterface)
return self.session
def _prePathURL(self, prepath):
port = self.getHost().port
if self.isSecure():
default = 443
else:
default = 80
if port == default:
hostport = ''
else:
hostport = ':%d' % port
prefix = networkString('http%s://%s%s/' % (
self.isSecure() and 's' or '',
nativeString(self.getRequestHostname()),
hostport))
path = b'/'.join([quote(segment, safe=b'') for segment in prepath])
return prefix + path
def prePathURL(self):
return self._prePathURL(self.prepath)
def URLPath(self):
from twisted.python import urlpath
return urlpath.URLPath.fromRequest(self)
def rememberRootURL(self):
"""
Remember the currently-processed part of the URL for later
recalling.
"""
url = self._prePathURL(self.prepath[:-1])
self.appRootURL = url
def getRootURL(self):
"""
Get a previously-remembered URL.
"""
return self.appRootURL
@implementer(iweb._IRequestEncoderFactory)
class GzipEncoderFactory(object):
"""
@cvar compressLevel: The compression level used by the compressor, default
to 9 (highest).
@since: 12.3
"""
compressLevel = 9
def encoderForRequest(self, request):
"""
Check the headers if the client accepts gzip encoding, and encodes the
request if so.
"""
acceptHeaders = request.requestHeaders.getRawHeaders(
'accept-encoding', [])
supported = ','.join(acceptHeaders).split(',')
if 'gzip' in supported:
encoding = request.responseHeaders.getRawHeaders(
'content-encoding')
if encoding:
encoding = '%s,gzip' % ','.join(encoding)
else:
encoding = 'gzip'
request.responseHeaders.setRawHeaders('content-encoding',
[encoding])
return _GzipEncoder(self.compressLevel, request)
@implementer(iweb._IRequestEncoder)
class _GzipEncoder(object):
"""
An encoder which supports gzip.
@ivar _zlibCompressor: The zlib compressor instance used to compress the
stream.
@ivar _request: A reference to the originating request.
@since: 12.3
"""
_zlibCompressor = None
def __init__(self, compressLevel, request):
self._zlibCompressor = zlib.compressobj(
compressLevel, zlib.DEFLATED, 16 + zlib.MAX_WBITS)
self._request = request
def encode(self, data):
"""
Write to the request, automatically compressing data on the fly.
"""
if not self._request.startedWriting:
# Remove the content-length header, we can't honor it
# because we compress on the fly.
self._request.responseHeaders.removeHeader(b'content-length')
return self._zlibCompressor.compress(data)
def finish(self):
"""
Finish handling the request request, flushing any data from the zlib
buffer.
"""
remain = self._zlibCompressor.flush()
self._zlibCompressor = None
return remain
class _RemoteProducerWrapper:
def __init__(self, remote):
self.resumeProducing = remote.remoteMethod("resumeProducing")
self.pauseProducing = remote.remoteMethod("pauseProducing")
self.stopProducing = remote.remoteMethod("stopProducing")
class Session(components.Componentized):
"""
A user's session with a system.
This utility class contains no functionality, but is used to
represent a session.
@ivar uid: A unique identifier for the session, C{bytes}.
@ivar _reactor: An object providing L{IReactorTime} to use for scheduling
expiration.
@ivar sessionTimeout: timeout of a session, in seconds.
"""
sessionTimeout = 900
_expireCall = None
def __init__(self, site, uid, reactor=None):
"""
Initialize a session with a unique ID for that session.
"""
components.Componentized.__init__(self)
if reactor is None:
from twisted.internet import reactor
self._reactor = reactor
self.site = site
self.uid = uid
self.expireCallbacks = []
self.touch()
self.sessionNamespaces = {}
def startCheckingExpiration(self):
"""
Start expiration tracking.
@return: C{None}
"""
self._expireCall = self._reactor.callLater(
self.sessionTimeout, self.expire)
def notifyOnExpire(self, callback):
"""
Call this callback when the session expires or logs out.
"""
self.expireCallbacks.append(callback)
def expire(self):
"""
Expire/logout of the session.
"""
del self.site.sessions[self.uid]
for c in self.expireCallbacks:
c()
self.expireCallbacks = []
if self._expireCall and self._expireCall.active():
self._expireCall.cancel()
# Break reference cycle.
self._expireCall = None
def touch(self):
"""
Notify session modification.
"""
self.lastModified = self._reactor.seconds()
if self._expireCall is not None:
self._expireCall.reset(self.sessionTimeout)
version = networkString("TwistedWeb/%s" % (copyright.version,))
class Site(http.HTTPFactory):
"""
A web site: manage log, sessions, and resources.
@ivar counter: increment value used for generating unique sessions ID.
@ivar requestFactory: factory creating requests objects. Default to
L{Request}.
@ivar displayTracebacks: if set, Twisted internal errors are displayed on
rendered pages. Default to C{True}.
@ivar sessionFactory: factory for sessions objects. Default to L{Session}.
@ivar sessionCheckTime: Deprecated. See L{Session.sessionTimeout} instead.
"""
counter = 0
requestFactory = Request
displayTracebacks = True
sessionFactory = Session
sessionCheckTime = 1800
def __init__(self, resource, *args, **kwargs):
"""
@param resource: The root of the resource hierarchy. All request
traversal for requests received by this factory will begin at this
resource.
@type resource: L{IResource} provider
@see: L{twisted.web.http.HTTPFactory.__init__}
"""
http.HTTPFactory.__init__(self, *args, **kwargs)
self.sessions = {}
self.resource = resource
def _openLogFile(self, path):
from twisted.python import logfile
return logfile.LogFile(os.path.basename(path), os.path.dirname(path))
def __getstate__(self):
d = self.__dict__.copy()
d['sessions'] = {}
return d
def _mkuid(self):
"""
(internal) Generate an opaque, unique ID for a user's session.
"""
from hashlib import md5
import random
self.counter = self.counter + 1
return md5(networkString(
"%s_%s" % (str(random.random()) , str(self.counter)))
).hexdigest()
def makeSession(self):
"""
Generate a new Session instance, and store it for future reference.
"""
uid = self._mkuid()
session = self.sessions[uid] = self.sessionFactory(self, uid)
session.startCheckingExpiration()
return session
def getSession(self, uid):
"""
Get a previously generated session, by its unique ID.
This raises a KeyError if the session is not found.
"""
return self.sessions[uid]
def buildProtocol(self, addr):
"""
Generate a channel attached to this site.
"""
channel = http.HTTPFactory.buildProtocol(self, addr)
channel.requestFactory = self.requestFactory
channel.site = self
return channel
isLeaf = 0
def render(self, request):
"""
Redirect because a Site is always a directory.
"""
request.redirect(request.prePathURL() + b'/')
request.finish()
def getChildWithDefault(self, pathEl, request):
"""
Emulate a resource's getChild method.
"""
request.site = self
return self.resource.getChildWithDefault(pathEl, request)
def getResourceFor(self, request):
"""
Get a resource for a request.
This iterates through the resource heirarchy, calling
getChildWithDefault on each resource it finds for a path element,
stopping when it hits an element where isLeaf is true.
"""
request.site = self
# Sitepath is used to determine cookie names between distributed
# servers and disconnected sites.
request.sitepath = copy.copy(request.prepath)
return resource.getChildForRequest(self.resource, request)
|
|
#This file is only to be run by celery
from __future__ import absolute_import
import datetime
import json
from lib.celery import app as celery
from pymongo import MongoClient
#Access the database for the files
client = MongoClient()
db = client["files"]
entries = db["entries"]
SUPPORTED_FORMATS = ".txt or .json"
#Tasks
#_convert_txt and _convert_json are launched by index to parse the file and optimize it for searchability.
@celery.task
def _convert_txt (ID):
doc = entries.find_one({"_id": ID})
#In case the search fails
if not doc:
print("Problem opening document with id {0}, exiting...".format(ID))
return
try:
#Automatically opens in read only mode
file = open(doc["path"])
except FileNotFoundError as error:
print("File {0} was not found.\nError:\n{1}\nEnding conversion task.".format(doc["path"], error))
except:
print("Unexpected error in opening file {0}.".format(doc["path"]))
raise
else:
'''Retrieving every line from the file and processing it to be search-friendly
Replaced with spaces to avoid words being conjoined'''
#Couldn't replace "\x", getting unicode error
#Including the line above in the multi-line comment was causing an error
lines = [line.replace("\n", " ").replace("\r", " ").replace("\t", " ").replace("\v", " ").replace("\b", "").replace("\a", "").replace("\f", " ").replace("\h", "").strip() for line in file if not line == "\n"]
#Conjoining the lines into one searchable lump of text
text = " ".join(lines)
#Removing consecutive spaces from the text
while not text.find(" ") == -1:
#Couldn't use str.replace(" ", " "), would result in never exiting loop.
split = text.partition(" ")
text = " ".join([split[0], split[2]])
doc["text"] = text
doc["complete"] = True
print(doc)
entries.save(doc)
file.close()
print("Completed indexing for entry given with path {0}".format(doc["path"]))
@celery.task
def _convert_json (entry, path, fileName):
doc = {
"title": entry["title"],
#Simply the name of the file
"name": fileName,
"text": entry["text"],
"tags": entry["tags"],
#Path the file relative to the directory of indexsearch.py or absolute
"path": path,
#Time of indexing
"date": datetime.datetime.utcnow(),
#Used to prevent this document from being searched before _convert_txt finishes this entry, not necessary here
"complete": True
}
entries.insert(doc)
#Task called by command line interface to index files for searching.
@celery.task
def index (path, meta):
#Forward slashes will always work with open()
path.replace("\\", "/")
#Ensuring that important fields are text searchable by Mongo and weighting their importance
entries.ensure_index([("title", "text"), ("name", "text"), ("text", "text"), ("tags", "text")], weights={"title": 10, "name": 10, "text": 1, "tags": 5})
#Retrieving the name of the file and its extension
fileName = path[path.rfind("/") + 1:]
#Creating a string out of the tags given to the file
tags = " ".join(meta["tags"])
#Currently just setting it up to handle .txt and .json formats. Easy to expand to include other formats but want to get this submitted ASAP
#Handle .txt files
if path[-4:] == ".txt":
document = {
#Can be "", optional
"title": meta["title"],
#Simply the name of the file
"name": fileName,
#Text is parsed then added by _convert_txt task
"text": "",
#If tags are given then they end up in a string with a space delimeter here
"tags": tags,
#Path the file relative to the directory of indexsearch.py or absolute
"path": path,
#Time of indexing
"date": datetime.datetime.utcnow(),
#Used to prevent this document from being searched before _convert_txt finishes this entry
"complete": False
}
#Inserts the document and sends the _id to _convert
_convert.delay(entries.insert(document))
#Handle .json files
elif path[-5:] == ".json":
try:
#Automatically opens in read only mode
file = open(path)
except FileNotFoundError as error:
print("File {0} was not found.\nError:\n{1}\nEnding conversion task.".format(path, error))
except:
print("Unexpected error in opening file {0}.".format(path))
raise
else:
#Using python's json module
json_file = json.load(file)
if "entries" in json_file.keys():
if type(json_file["entries"]) is list:
for entry in json_file["entries"]:
if "title" in entry.keys() and "text" in entry.keys() and "tags" in entry.keys():
_convert_json(entry, path, fileName)
else:
print("Entry {0} in file at path {1} did not have all required fields, out of title, text and tags".format(entry, path))
else:
print("File at path {0} did not follow the template for json files, value for key 'entries' was not a list/array".format(path))
else:
print(".json file at {0} did not have required keys 'title', 'text', and 'tags'.".format(doc["path"]))
file.close()
else:
print("Format of document at {0} is not currently supported. Please use ".format(doc["path"]) + SUPPORTED_FORMATS)
#Task used to search all file entries and return results
@celery.task
def search (phrase, sort="relevance", resultFilter=None, filterData=None):
#Turns the Mongo query results into readable results to send back to the command line interface.
def results (matches, sort, resultFilter, filterData):
#If the search turns up any results...
if len(matches) > 0:
#Stores all the strings sent back to command line
results = []
if not sort == "relevance":
if sort == "date-new":
if len(matches) > 1:
#Create a list of the dates of each entry to sort with...
dates = []
for match in matches:
dates.append(match["obj"]["date"])
#Create a zip object of two-tuples, t[0] = date, t[1] = match
matches = zip(dates, matches)
#Extracting the two-tuples from the zip object into a list
matches = [pair for pair in matches]
#Sorts based on the first element of the two-tuple, the date. Smallest to largest (Oldest to newest)
matches.sort()
#Same as date-old but everything gets reversed
matches.reverse()
#Taking just the match object of the two-tuple with the date
matches = [match[1] for match in matches]
elif sort == "date-old":
if len(matches) > 1:
#Create a list of the dates of each entry to sort with...
dates = []
for match in matches:
dates.append(match["obj"]["date"])
#Create a zip object of two-tuples, t[0] = date, t[1] = match
matches = zip(dates, matches)
#Extracting the two-tuples from the zip object into a list
matches = [pair for pair in matches]
#Sorts based on the first element of the two-tuple, the date. Smallest to largest (Oldest to newest)
matches.sort()
#Taking just the match object of the two-tuple with the date
matches = [match[1] for match in matches]
else:
raise NotImplementedError("Sort method {0} has not been implemented.".format(sort))
if resultFilter == "type":
#Keeps indexes of matches that are scrubbed out by the filter
filtered = []
for index in range(len(matches)):
match = matches[index]
if not match["obj"]["name"].find(".") == -1:
#Matching the file extension
for extensionFilter in filterData:
if not match["obj"]["name"][match["obj"]["name"].rfind("."):] == extensionFilter:
filtered.append(index)
#Highest indexes first
filtered.reverse()
#Delete filtered results
for index in filtered:
del matches[index]
#Create a report for each result
for index in range(len(matches)):
match = matches[index]
if sort == "relevance":
#If the entry was given a title, use it otherwise only the file path.
results.append("{0} - Match Coefficient: {1}\n".format(match["obj"]["title"] + " (" + match["obj"]["path"] + ")" if not match["obj"]["title"] == "" else match["obj"]["path"], match["score"]))
elif sort == "date-new" or sort == "date-old":
#If the entry was given a title, use it otherwise only the file path.
results.append("{0} - Date indexed: {1}\n".format(match["obj"]["title"] + " (" + match["obj"]["path"] + ")" if not match["obj"]["title"] == "" else match["obj"]["path"], match["obj"]["date"]))
#If all results got removed by the filter...
if len(results) < 1:
results = "No matches found for search of {0}\n".format(phrase)
return results
#If there were no results in the first place...
else:
return "No matches found for search of {0}\n".format(phrase)
#Search task returns results from call of results function which was given the Mongo search, to the command line interface
return(results(db.command("text", "entries", search=phrase, filter={"complete": True}, projection={"complete": False})["results"], sort, resultFilter, filterData))
if __name__ == "__main__":
print("This file only meant for celery tasks. Do not attempt to run it.")
|
|
# Copyright 2011-2021 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
# import logging ; logger = logging.getLogger(__name__)
import inspect
import copy
from django.conf import settings
from django.db.models.signals import class_prepared
from django.core.exceptions import FieldDoesNotExist
# from django.db.models.fields import FieldDoesNotExist
from django.db import models
from django.dispatch import receiver
from lino.core import fields
from lino.core.signals import pre_analyze
from .utils import resolve_model
from django.apps import apps
get_models = apps.get_models
PENDING_INJECTS = dict()
PREPARED_MODELS = dict()
def fix_field_cache(model):
"""
Remove duplicate entries in the field cache of the specified model
in order to fix Django issue #10808
"""
new_cache = []
used_fields = {}
for parent in model._meta.get_parent_list():
for f in parent._meta.local_fields:
used_fields[f.name] = f
used_fields[f.attname] = f
# def msg(name):
# parent = used_fields.get(name).model
# print("20200622 Not adding {} to {} because inherited from {}".format(
# name, model, parent))
for f in model._meta.local_fields:
if used_fields.get(f.name):
pass # msg(f.name)
elif used_fields.get(f.attname):
pass # msg(f.attname)
else:
new_cache.append(f)
#~ raise Exception("20131110 %r" % (model._meta._field_cache,))
model._meta.local_fields = new_cache
# model._meta._expire_cache()
# print(model._meta.fields)
# from django.apps import apps
# for m in apps.get_models():
# if issubclass(m, model):
# m._meta._expire_cache()
# django.core.exceptions.AppRegistryNotReady: Models aren't loaded yet.
@receiver(class_prepared)
def on_class_prepared(sender, **kw):
"""This is Lino's general `class_prepared` handler.
It does two things:
- Run pending calls to :func:`inject_field` and :func:`update_field`.
- Apply a workaround for Django's ticket 10808. In a diamond
inheritance pattern, `_meta._field_cache` contains certain
fields twice. So we remove these duplicate fields from
`_meta._field_cache`. (A better solution would be of course to
not collect them.)
"""
model = sender
# collect_virtual_fields() first time because virtual fields might
# get updated
# collect_virtual_fields(model)
k = model._meta.app_label + '.' + model.__name__
PREPARED_MODELS[k] = model
#~ logger.info("20120627 on_class_prepared %r = %r",k,model)
todos = PENDING_INJECTS.pop(k, None)
if todos is not None:
for func, caller in todos:
func(model)
#~ for k,v in injects.items():
#~ model.add_to_class(k,v)
fix_field_cache(model)
# # collect_virtual_fields() second time because new virtual fields
# # might have been injected
# collect_virtual_fields(model)
def fmt(func_caller):
f, caller = func_caller
#~ ln = inspect.getsourcelines(f)[1]
#~ return "%s in %s:%d" % (f.__name__,inspect.getsourcefile(f),ln)
#~ return "%s in %s:%d" % (f.__name__,caller.filename,caller.line_no)
#~ return "%s in %s" % (f.__name__,caller)
return "called from %s" % caller
@receiver(pre_analyze)
def check_pending_injects(sender, models_list=None, **kw):
# raise Exception(20150304)
# called from kernel.analyze_models()
#~ logger.info("20130212 check_pending_injects()...")
if PENDING_INJECTS:
msg = ''
for spec, funcs in list(PENDING_INJECTS.items()):
msg += spec + ': '
msg += ', '.join([fmt(f) for f in funcs])
#~ msg += '\n'.join([str(dir(func)) for func in funcs])
#~ msg += '\n'.join([str(func.func_code.co_consts) for func in funcs])
#~ msg += str(funcs)
raise Exception("Oops, there are pending injects: %s" % msg)
#~ logger.warning("pending injects: %s", msg)
#~ logger.info("20131110 no pending injects")
"""
20130106
now we loop a last time over each model and fill it's _meta._field_cache
otherwise if some application module used inject_field() on a model which
has subclasses, then the new field would not be seen by subclasses
"""
for model in models_list:
model._meta._expire_cache()
fix_field_cache(model)
# collect_virtual_fields(model)
def do_when_prepared(todo, *model_specs):
"""
Execute the specified function `todo` on all specified models
as soon as they are prepared.
If a specified model hasn't yet been prepared,
add the call to a queue and execute it later.
If a model_spec is not a string, the function `todo` is called immediately.
"""
#~ caller = inspect.stack()[2]
caller = inspect.getouterframes(inspect.currentframe())[2]
#~ print 20131111, caller
caller = "%s:%d" % (caller[1], caller[2])
#~ caller = inspect.getframeinfo(caller)
#~ caller = inspect.getframeinfo(inspect.currentframe().f_back)[2]
#~ caller = inspect.getframeinfo(caller.f_back)[2]
# logger.info("20200622 test %s for %s", caller, model_specs)
for model_spec in model_specs:
if model_spec is None:
# logger.info("20200622 Ignore %s for %s", caller, model_spec)
# e.g. inject_field during autodoc when user_model is None
continue
if isinstance(model_spec, str):
k = model_spec
model = PREPARED_MODELS.get(k, None)
if model is None:
injects = PENDING_INJECTS.setdefault(k, [])
injects.append((todo, caller))
#~ d[name] = field
#~ if model_spec == "system.SiteConfig":
# logger.info("20200622 Defer %s for %s", caller, model_spec)
continue
else:
model = model_spec
#~ k = model_spec._meta.app_label + '.' + model_spec.__name__
#~ if model._meta.abstract:
#~ raise Exception("Trying do_when_prepared on abstract model %s" % model)
# logger.info("20200622 Run %s for %s", caller, model)
# from lino.core.model import Model
# Model.django2lino(model)
todo(model)
def when_prepared(*model_specs):
"""
Decorator to declare a function which will automatically run when
the specified models has been prepared.
If the model has already been prepared, the function is executed
immediately.
"""
def decorator(fn):
return do_when_prepared(fn, *model_specs)
return decorator
def inject_action(model_spec, **actions):
"""
Add the specified action(s) to the specified model.
This can also be used to inject any other class attribute on a model, e.g.
choosers.
"""
def todo(model):
model.define_action(**actions)
return do_when_prepared(todo, model_spec)
def update_model(model_spec, **actions):
"""
Replace the specified attributes in the specified model.
"""
def todo(model):
for k, v in actions.items():
if not hasattr(model, k):
raise Exception(
"%s has no attribute %s to update." % (model, k))
setattr(model, k, v)
if isinstance(model_spec, models.Model):
return todo(model_spec)
return do_when_prepared(todo, model_spec)
def inject_field(model_spec, name, field, doc=None, active=False):
"""Add the given field to the given model.
The following code::
class Foo(dd.Model):
field1 = dd.ForeignKey(...)
dd.inject_field(Foo, 'field2', models.CharField(max_length=20))
is functionally equivalent to this code::
class Foo(dd.Model):
field1 = dd.ForeignKey(Bar)
field2 = models.CharField(max_length=20)
Because `inject_field` is usually called at the global level of
`models modules`, it cannot know whether the given `model_spec`
has already been imported (and its class prepared) or not. That's
why it uses Django's `class_prepared` signal to maintain its own
list of models.
Note that :meth:`inject_field` causes problems when the modified
model has subclasses and is not abstract (i.e., is an MTI parent).
Subclasses will have only some part of the injected field's
definition.
"""
if doc:
field.__doc__ = doc
def todo(model):
# logger.info("20200621 gonna inject_field %s into %s", name, model)
if True: # 20181023
try:
model._meta.get_field(name)
raise Exception("Duplicate field {} on {}".format(
name, model))
except FieldDoesNotExist:
pass
model.add_to_class(name, field)
fix_field_cache(model)
if active:
model.add_active_field(name)
return do_when_prepared(todo, model_spec)
def update_field(model_spec, name, **kw):
"""
Update some attribute of the specified existing field. For example
:class:`Human <lino.mixins.human.Human>` defines a field
`first_name` which may not be blank. If you inherit from this
mixin but want `first_name` to be optional::
class MyPerson(mixins.Human):
...
dd.update_field(MyPerson, 'first_name', blank=True)
Or you want to change the label of a field defined in an inherited
mixin, as done in :mod:`lino_xl.lib.outbox.models`::
dd.update_field(Mail, 'user', verbose_name=_("Sender"))
"""
# if name == "overview":
# if 'verbose_name' in kw:
# if kw['verbose_name'] is None:
# raise Exception("20181022")
def todo(model):
from lino.core import actors
model.collect_virtual_fields()
# if issubclass(model, models.Model):
# collect_virtual_fields(model)
de = model.get_data_elem(name)
if de is None:
msg = "Cannot update unresolved field %s.%s" % (model, name)
raise Exception(msg)
# logger.warning(msg)
# update_data_element(model, name, de, **kw)
# if de.model is not model:
# if issubclass(model, actors.Actor):
# else:
# msg = "20190102 field %s.%s %s" % (model, name, de.model)
# raise Exception(msg)
if isinstance(de, fields.VirtualField):
if issubclass(model, models.Model):
de.attach_to_model(model, name)
model._meta.add_field(de, private=True)
elif issubclass(model, actors.Actor):
if de.model is not model:
# old_rt = de.return_type
de = copy.deepcopy(de)
# de.return_type = copy.deepcopy(de.return_type)
# assert de.return_type is not old_rt
de.model = model
setattr(model, name, de)
# model.add_virtual_field(name, de)
model.virtual_fields[name] = de
# de.model = model
# settings.SITE.register_virtual_field(de)
fld = de.return_type
else:
assert not issubclass(model, actors.Actor)
fld = de
# if kw.get('verbose_name', None) == "Invoice":
# print("20190102 {} {} {}".format(model, de.model, fld.model))
for k, v in kw.items():
setattr(fld, k, v)
# propagate attribs from delegate to virtualfield
if isinstance(de, fields.VirtualField):
settings.SITE.register_virtual_field(de)
# de.lino_resolve_type()
# if name == "overview" and model.__name__ == "Client":
# print("20181023", model, de.verbose_name, fld.verbose_name)
return do_when_prepared(todo, model_spec)
# def update_data_element(model, name, de, **kw):
# return de
def inject_quick_add_buttons(model, name, target):
"""
Injects a virtual display field `name` into the specified `model`.
This field will show up to three buttons
`[New]` `[Show last]` `[Show all]`.
`target` is the table that will run these actions.
It must be a slave of `model`.
"""
def fn(self, ar):
if ar is None:
return ''
return ar.renderer.quick_add_buttons(
ar.spawn(target, master_instance=self))
tm = resolve_model(target.model)
inject_field(model, name,
fields.VirtualField(fields.DisplayField(
tm._meta.verbose_name_plural), fn))
def django_patch():
"""Remove duplicate entries in the field cache of models to fix
Django ticket :djangoticket:`10808`.
See :doc:`/dev/diamond`.
"""
check_pending_injects(None, get_models())
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class netbridge_nsip_binding(base_resource) :
""" Binding class showing the nsip that can be bound to netbridge.
"""
def __init__(self) :
self._ipaddress = ""
self._netmask = ""
self._name = ""
self.___count = 0
@property
def name(self) :
ur"""The name of the network bridge.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""The name of the network bridge.
"""
try :
self._name = name
except Exception as e:
raise e
@property
def netmask(self) :
ur"""The network mask for the subnet.
"""
try :
return self._netmask
except Exception as e:
raise e
@netmask.setter
def netmask(self, netmask) :
ur"""The network mask for the subnet.
"""
try :
self._netmask = netmask
except Exception as e:
raise e
@property
def ipaddress(self) :
ur"""The subnet that is extended by this network bridge.<br/>Minimum length = 1.
"""
try :
return self._ipaddress
except Exception as e:
raise e
@ipaddress.setter
def ipaddress(self, ipaddress) :
ur"""The subnet that is extended by this network bridge.<br/>Minimum length = 1
"""
try :
self._ipaddress = ipaddress
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(netbridge_nsip_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.netbridge_nsip_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = netbridge_nsip_binding()
updateresource.name = resource.name
updateresource.ipaddress = resource.ipaddress
updateresource.netmask = resource.netmask
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [netbridge_nsip_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].ipaddress = resource[i].ipaddress
updateresources[i].netmask = resource[i].netmask
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = netbridge_nsip_binding()
deleteresource.name = resource.name
deleteresource.ipaddress = resource.ipaddress
deleteresource.netmask = resource.netmask
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [netbridge_nsip_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].ipaddress = resource[i].ipaddress
deleteresources[i].netmask = resource[i].netmask
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch netbridge_nsip_binding resources.
"""
try :
obj = netbridge_nsip_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of netbridge_nsip_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = netbridge_nsip_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count netbridge_nsip_binding resources configued on NetScaler.
"""
try :
obj = netbridge_nsip_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of netbridge_nsip_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = netbridge_nsip_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class netbridge_nsip_binding_response(base_response) :
def __init__(self, length=1) :
self.netbridge_nsip_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.netbridge_nsip_binding = [netbridge_nsip_binding() for _ in range(length)]
|
|
# See file COPYING distributed with sjs for copyright and license.
import os
import sys
import traceback
import signal
import string
import datetime
import time
import subprocess
import sqlite3
class SJSError(Exception):
"""base class for ssggee errors
derived classes should define __str__ in a way that is appropriate
to print as an error for the user
"""
class SJSROOTError(SJSError):
"""SJS_ROOT undefined or not a directory"""
def __init__(self, error):
SJSError.__init__(self)
self.error = error
def __str__(self):
return 'SJS_ROOT is %s' % self.error
class Cluster:
def __init__(self):
self.sjs_root = os.path.abspath(os.environ['SJS_ROOT'])
if not 'SJS_ROOT' in os.environ:
raise SJSROOTError('not defined')
if not os.path.isdir(os.environ['SJS_ROOT']):
raise SJSROOTError('not a directory')
slots_fname = os.path.join(self.sjs_root, 'slots')
slots = open(slots_fname).read().strip()
self.slots = int(slots)
assert self.slots > 0
return
def log(self, msg):
t = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
fo = open(os.path.join(self.sjs_root, 'log'), 'a')
fo.write('%s %s\n' % (t, msg))
fo.close()
return
def submit(self, name, shell, script, script_args, stdout, stderr):
db = sqlite3.connect(os.path.join(self.sjs_root, 'db.sqlite'))
insert_query = """INSERT INTO job (user,
name,
shell,
script,
error_flag,
stdout,
stderr,
t_submit)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)"""
arg_query = """INSERT INTO argument (job_id, ordinal, argument)
VALUES (?, ?, ?)"""
try:
c = db.cursor()
query_params = (os.getlogin(),
name,
shell,
'',
False,
stdout,
stderr,
int(time.time()))
c.execute(insert_query, query_params)
c.execute("SELECT LAST_INSERT_ROWID()")
rowid = c.fetchone()[0]
c.execute("SELECT id FROM job where rowid = ?", (rowid, ))
job_id = c.fetchone()[0]
script_fname = os.path.join(self.sjs_root, 'spool', str(job_id))
open(script_fname, 'w').write(script)
c.execute("UPDATE job SET script = ? WHERE id = ?",
(script_fname, job_id))
for (i, arg) in enumerate(script_args):
c.execute(arg_query, (job_id, i+1, arg))
c.close()
except:
db.rollback()
raise
else:
db.commit()
db.close()
self.log('queued job %d (%s)' % (job_id, name))
return self.get_job(job_id)
def get_job(self, job_id):
return _Job(self, job_id)
def get_all_jobs(self):
db = sqlite3.connect(os.path.join(self.sjs_root, 'db.sqlite'))
c = db.cursor()
c.execute("SELECT id FROM job ORDER BY id")
job_ids = [ row[0] for row in c ]
jobs = [ self.get_job(job_id) for job_id in job_ids ]
return jobs
def get_queued_jobs(self):
jobs = []
for job in self.get_all_jobs():
if job.error_flag:
continue
if job.is_running():
continue
jobs.append(job)
return jobs
def get_running_jobs(self):
jobs = []
for job in self.get_all_jobs():
if job.is_running():
jobs.append(job)
return jobs
def start_runner(self):
if os.fork():
return
os.setsid()
os.chdir(os.path.expanduser('~'))
os.close(0)
os.close(1)
os.close(2)
one = os.open('/dev/null', os.O_RDONLY)
os.open(os.path.join(self.sjs_root, 'qrun.stdout'),
os.O_WRONLY | os.O_APPEND)
os.open(os.path.join(self.sjs_root, 'qrun.stderr'),
os.O_WRONLY | os.O_APPEND)
self.log('qrunner (%d) started' % os.getpid())
while True:
if len(self.get_running_jobs()) >= self.slots:
self.log('qrunner (%d) all slots filled' % os.getpid())
break
jobs = self.get_queued_jobs()
if not jobs:
self.log('qrunner (%d) no queued jobs' % os.getpid())
break
job = jobs[0]
try:
job.run()
except Exception, data:
self.log('qrunner (%d) error running job %d' % (os.getpid(),
job.id))
self.log('qrunner (%d) %s' % (os.getpid(), str(data)))
err_info = traceback.extract_tb(sys.exc_info()[-1])
self.log('qrunner (%d) %s line %d' % (os.getpid(),
err_info[-1][0],
err_info[-1][1]))
self.log('qrunner (%d) done' % os.getpid())
sys.exit(0)
class _Job:
def __init__(self, cluster, job_id):
self.cluster = cluster
db = sqlite3.connect(os.path.join(cluster.sjs_root, 'db.sqlite'))
c = db.cursor()
c.execute("SELECT * FROM job WHERE id = ?", (job_id, ))
row = c.fetchone()
if not row:
raise KeyError('job %d not found' % job_id)
cols = [ el[0] for el in c.description ]
row_dict = dict(zip(cols, row))
self.id = row_dict['id']
self.user = row_dict['user']
self.name = row_dict['name']
self.shell = row_dict['shell']
self.script = row_dict['script']
self.error_flag = bool(row_dict['error_flag'])
self.stdout = row_dict['stdout']
self.stderr = row_dict['stderr']
self.pid = row_dict['pid']
if row_dict['t_submit'] is None:
self.t_submit = None
else:
t_sub = row_dict['t_submit']
self.t_submit = datetime.datetime.fromtimestamp(t_sub)
if row_dict['t_start'] is None:
self.t_start = None
else:
self.t_start = datetime.datetime.fromtimestamp(row_dict['t_start'])
query = """SELECT argument
FROM argument
WHERE job_id = ?
ORDER BY ordinal"""
c.execute(query, (job_id, ))
self.args = [ row[0] for row in c.fetchall() ]
c.close()
db.close()
return
def run(self):
try:
args = [self.shell, self.script]
args.extend(self.args)
environ = dict(os.environ)
environ['JOB_ID'] = str(self.id)
environ['JOB_NAME'] = self.name
stdin = open('/dev/null')
stdout_path = string.Template(self.stdout).safe_substitute(environ)
stderr_path = string.Template(self.stderr).safe_substitute(environ)
stdout = open(stdout_path, 'w')
stderr = open(stderr_path, 'w')
po = subprocess.Popen(args,
stdin=stdin,
stdout=stdout,
stderr=stderr,
env=environ)
t = int(time.time())
db = sqlite3.connect(os.path.join(self.cluster.sjs_root,
'db.sqlite'))
try:
c = db.cursor()
c.execute("UPDATE job SET pid = ?, t_start = ? WHERE id = ?",
(po.pid, t, self.id))
c.close()
except:
db.rollback()
raise
else:
db.commit()
db.close()
self.pid = po.pid
self.t_start = datetime.datetime.fromtimestamp(t)
except:
self.set_error()
raise
fmt = 'qrunner (%d) started job %d (PID %d)'
self.cluster.log(fmt % (os.getpid(), self.id, po.pid))
po.wait()
stdin.close()
stdout.close()
stderr.close()
self.clean()
fmt = 'qrunner (%d) job %d finished'
self.cluster.log(fmt % (os.getpid(), self.id))
return
def set_error(self):
db = sqlite3.connect(os.path.join(self.cluster.sjs_root, 'db.sqlite'))
try:
c = db.cursor()
c.execute("UPDATE job SET error_flag = ? WHERE id = ?",
(True, self.id))
c.close()
except:
db.rollback()
raise
else:
db.commit()
db.close()
self.error_flag = True
return
def is_running(self):
return self.t_start is not None
def clean(self):
self.cluster.log('deleting job %d' % self.id)
os.remove(self.script)
db = sqlite3.connect(os.path.join(self.cluster.sjs_root, 'db.sqlite'))
try:
c = db.cursor()
c.execute("DELETE FROM job WHERE id = ?", (self.id, ))
c.close()
except:
db.rollback()
raise
else:
db.commit()
db.close()
return
def delete(self):
if self.is_running():
self.cluster.log('kill %d to end job %d' % (self.pid, self.id))
os.kill(self.pid, signal.SIGKILL)
self.clean()
return
# eof
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
import gc
import StringIO, sys, types
from twisted.trial import unittest, runner
from twisted.scripts import trial
from twisted.python import util, deprecate, versions
from twisted.python.compat import set
from twisted.python.filepath import FilePath
from twisted.trial.test.test_loader import testNames
pyunit = __import__('unittest')
def sibpath(filename):
"""For finding files in twisted/trial/test"""
return util.sibpath(__file__, filename)
class ForceGarbageCollection(unittest.TestCase):
"""
Tests for the --force-gc option.
"""
def setUp(self):
self.config = trial.Options()
self.log = []
self.patch(gc, 'collect', self.collect)
test = pyunit.FunctionTestCase(self.simpleTest)
self.test = runner.TestSuite([test, test])
def simpleTest(self):
"""
A simple test method that records that it was run.
"""
self.log.append('test')
def collect(self):
"""
A replacement for gc.collect that logs calls to itself.
"""
self.log.append('collect')
def makeRunner(self):
"""
Return a L{runner.TrialRunner} object that is safe to use in tests.
"""
runner = trial._makeRunner(self.config)
runner.stream = StringIO.StringIO()
return runner
def test_forceGc(self):
"""
Passing the --force-gc option to the trial script forces the garbage
collector to run before and after each test.
"""
self.config['force-gc'] = True
self.config.postOptions()
runner = self.makeRunner()
runner.run(self.test)
self.assertEqual(self.log, ['collect', 'test', 'collect',
'collect', 'test', 'collect'])
def test_unforceGc(self):
"""
By default, no garbage collection is forced.
"""
self.config.postOptions()
runner = self.makeRunner()
runner.run(self.test)
self.assertEqual(self.log, ['test', 'test'])
class TestSuiteUsed(unittest.TestCase):
"""
Check the category of tests suite used by the loader.
"""
def setUp(self):
"""
Create a trial configuration object.
"""
self.config = trial.Options()
def test_defaultSuite(self):
"""
By default, the loader should use L{runner.DestructiveTestSuite}
"""
loader = trial._getLoader(self.config)
self.assertEqual(loader.suiteFactory, runner.DestructiveTestSuite)
def test_untilFailureSuite(self):
"""
The C{until-failure} configuration uses the L{runner.TestSuite} to keep
instances alive across runs.
"""
self.config['until-failure'] = True
loader = trial._getLoader(self.config)
self.assertEqual(loader.suiteFactory, runner.TestSuite)
class TestModuleTest(unittest.TestCase):
def setUp(self):
self.config = trial.Options()
def tearDown(self):
self.config = None
def test_testNames(self):
"""
Check that the testNames helper method accurately collects the
names of tests in suite.
"""
self.assertEqual(testNames(self), [self.id()])
def assertSuitesEqual(self, test1, names):
loader = runner.TestLoader()
names1 = testNames(test1)
names2 = testNames(runner.TestSuite(map(loader.loadByName, names)))
names1.sort()
names2.sort()
self.assertEqual(names1, names2)
def test_baseState(self):
self.assertEqual(0, len(self.config['tests']))
def test_testmoduleOnModule(self):
"""
Check that --testmodule loads a suite which contains the tests
referred to in test-case-name inside its parameter.
"""
self.config.opt_testmodule(sibpath('moduletest.py'))
self.assertSuitesEqual(trial._getSuite(self.config),
['twisted.trial.test.test_test_visitor'])
def test_testmoduleTwice(self):
"""
When the same module is specified with two --testmodule flags, it
should only appear once in the suite.
"""
self.config.opt_testmodule(sibpath('moduletest.py'))
self.config.opt_testmodule(sibpath('moduletest.py'))
self.assertSuitesEqual(trial._getSuite(self.config),
['twisted.trial.test.test_test_visitor'])
def test_testmoduleOnSourceAndTarget(self):
"""
If --testmodule is specified twice, once for module A and once for
a module which refers to module A, then make sure module A is only
added once.
"""
self.config.opt_testmodule(sibpath('moduletest.py'))
self.config.opt_testmodule(sibpath('test_test_visitor.py'))
self.assertSuitesEqual(trial._getSuite(self.config),
['twisted.trial.test.test_test_visitor'])
def test_testmoduleOnSelfModule(self):
"""
When given a module that refers to *itself* in the test-case-name
variable, check that --testmodule only adds the tests once.
"""
self.config.opt_testmodule(sibpath('moduleself.py'))
self.assertSuitesEqual(trial._getSuite(self.config),
['twisted.trial.test.moduleself'])
def test_testmoduleOnScript(self):
"""
Check that --testmodule loads tests referred to in test-case-name
buffer variables.
"""
self.config.opt_testmodule(sibpath('scripttest.py'))
self.assertSuitesEqual(trial._getSuite(self.config),
['twisted.trial.test.test_test_visitor',
'twisted.trial.test.test_class'])
def test_testmoduleOnNonexistentFile(self):
"""
Check that --testmodule displays a meaningful error message when
passed a non-existent filename.
"""
buffy = StringIO.StringIO()
stderr, sys.stderr = sys.stderr, buffy
filename = 'test_thisbetternoteverexist.py'
try:
self.config.opt_testmodule(filename)
self.assertEqual(0, len(self.config['tests']))
self.assertEqual("File %r doesn't exist\n" % (filename,),
buffy.getvalue())
finally:
sys.stderr = stderr
def test_testmoduleOnEmptyVars(self):
"""
Check that --testmodule adds no tests to the suite for modules
which lack test-case-name buffer variables.
"""
self.config.opt_testmodule(sibpath('novars.py'))
self.assertEqual(0, len(self.config['tests']))
def test_testmoduleOnModuleName(self):
"""
Check that --testmodule does *not* support module names as arguments
and that it displays a meaningful error message.
"""
buffy = StringIO.StringIO()
stderr, sys.stderr = sys.stderr, buffy
moduleName = 'twisted.trial.test.test_script'
try:
self.config.opt_testmodule(moduleName)
self.assertEqual(0, len(self.config['tests']))
self.assertEqual("File %r doesn't exist\n" % (moduleName,),
buffy.getvalue())
finally:
sys.stderr = stderr
def test_parseLocalVariable(self):
declaration = '-*- test-case-name: twisted.trial.test.test_tests -*-'
localVars = trial._parseLocalVariables(declaration)
self.assertEqual({'test-case-name':
'twisted.trial.test.test_tests'},
localVars)
def test_trailingSemicolon(self):
declaration = '-*- test-case-name: twisted.trial.test.test_tests; -*-'
localVars = trial._parseLocalVariables(declaration)
self.assertEqual({'test-case-name':
'twisted.trial.test.test_tests'},
localVars)
def test_parseLocalVariables(self):
declaration = ('-*- test-case-name: twisted.trial.test.test_tests; '
'foo: bar -*-')
localVars = trial._parseLocalVariables(declaration)
self.assertEqual({'test-case-name':
'twisted.trial.test.test_tests',
'foo': 'bar'},
localVars)
def test_surroundingGuff(self):
declaration = ('## -*- test-case-name: '
'twisted.trial.test.test_tests -*- #')
localVars = trial._parseLocalVariables(declaration)
self.assertEqual({'test-case-name':
'twisted.trial.test.test_tests'},
localVars)
def test_invalidLine(self):
self.failUnlessRaises(ValueError, trial._parseLocalVariables,
'foo')
def test_invalidDeclaration(self):
self.failUnlessRaises(ValueError, trial._parseLocalVariables,
'-*- foo -*-')
self.failUnlessRaises(ValueError, trial._parseLocalVariables,
'-*- foo: bar; qux -*-')
self.failUnlessRaises(ValueError, trial._parseLocalVariables,
'-*- foo: bar: baz; qux: qax -*-')
def test_variablesFromFile(self):
localVars = trial.loadLocalVariables(sibpath('moduletest.py'))
self.assertEqual({'test-case-name':
'twisted.trial.test.test_test_visitor'},
localVars)
def test_noVariablesInFile(self):
localVars = trial.loadLocalVariables(sibpath('novars.py'))
self.assertEqual({}, localVars)
def test_variablesFromScript(self):
localVars = trial.loadLocalVariables(sibpath('scripttest.py'))
self.assertEqual(
{'test-case-name': ('twisted.trial.test.test_test_visitor,'
'twisted.trial.test.test_class')},
localVars)
def test_getTestModules(self):
modules = trial.getTestModules(sibpath('moduletest.py'))
self.assertEqual(modules, ['twisted.trial.test.test_test_visitor'])
def test_getTestModules_noVars(self):
modules = trial.getTestModules(sibpath('novars.py'))
self.assertEqual(len(modules), 0)
def test_getTestModules_multiple(self):
modules = trial.getTestModules(sibpath('scripttest.py'))
self.assertEqual(set(modules),
set(['twisted.trial.test.test_test_visitor',
'twisted.trial.test.test_class']))
def test_looksLikeTestModule(self):
for filename in ['test_script.py', 'twisted/trial/test/test_script.py']:
self.failUnless(trial.isTestFile(filename),
"%r should be a test file" % (filename,))
for filename in ['twisted/trial/test/moduletest.py',
sibpath('scripttest.py'), sibpath('test_foo.bat')]:
self.failIf(trial.isTestFile(filename),
"%r should *not* be a test file" % (filename,))
class WithoutModuleTests(unittest.TestCase):
"""
Test the C{without-module} flag.
"""
def setUp(self):
"""
Create a L{trial.Options} object to be used in the tests, and save
C{sys.modules}.
"""
self.config = trial.Options()
self.savedModules = dict(sys.modules)
def tearDown(self):
"""
Restore C{sys.modules}.
"""
for module in ('imaplib', 'smtplib'):
if module in self.savedModules:
sys.modules[module] = self.savedModules[module]
else:
sys.modules.pop(module, None)
def _checkSMTP(self):
"""
Try to import the C{smtplib} module, and return it.
"""
import smtplib
return smtplib
def _checkIMAP(self):
"""
Try to import the C{imaplib} module, and return it.
"""
import imaplib
return imaplib
def test_disableOneModule(self):
"""
Check that after disabling a module, it can't be imported anymore.
"""
self.config.parseOptions(["--without-module", "smtplib"])
self.assertRaises(ImportError, self._checkSMTP)
# Restore sys.modules
del sys.modules["smtplib"]
# Then the function should succeed
self.assertIsInstance(self._checkSMTP(), types.ModuleType)
def test_disableMultipleModules(self):
"""
Check that several modules can be disabled at once.
"""
self.config.parseOptions(["--without-module", "smtplib,imaplib"])
self.assertRaises(ImportError, self._checkSMTP)
self.assertRaises(ImportError, self._checkIMAP)
# Restore sys.modules
del sys.modules["smtplib"]
del sys.modules["imaplib"]
# Then the functions should succeed
self.assertIsInstance(self._checkSMTP(), types.ModuleType)
self.assertIsInstance(self._checkIMAP(), types.ModuleType)
def test_disableAlreadyImportedModule(self):
"""
Disabling an already imported module should produce a warning.
"""
self.assertIsInstance(self._checkSMTP(), types.ModuleType)
self.assertWarns(RuntimeWarning,
"Module 'smtplib' already imported, disabling anyway.",
trial.__file__,
self.config.parseOptions, ["--without-module", "smtplib"])
self.assertRaises(ImportError, self._checkSMTP)
class CoverageTests(unittest.TestCase):
"""
Tests for the I{coverage} option.
"""
if getattr(sys, 'gettrace', None) is None:
skip = (
"Cannot test trace hook installation without inspection API.")
def setUp(self):
"""
Arrange for the current trace hook to be restored when the
test is complete.
"""
self.addCleanup(sys.settrace, sys.gettrace())
def test_tracerInstalled(self):
"""
L{trial.Options} handles C{"--coverage"} by installing a trace
hook to record coverage information.
"""
options = trial.Options()
options.parseOptions(["--coverage"])
self.assertEqual(sys.gettrace(), options.tracer.globaltrace)
def test_coverdirDefault(self):
"""
L{trial.Options.coverdir} returns a L{FilePath} based on the default
for the I{temp-directory} option if that option is not specified.
"""
options = trial.Options()
self.assertEqual(
options.coverdir(),
FilePath(".").descendant([options["temp-directory"], "coverage"]))
def test_coverdirOverridden(self):
"""
If a value is specified for the I{temp-directory} option,
L{trial.Options.coverdir} returns a child of that path.
"""
path = self.mktemp()
options = trial.Options()
options.parseOptions(["--temp-directory", path])
self.assertEqual(
options.coverdir(), FilePath(path).child("coverage"))
class ExtraTests(unittest.TestCase):
"""
Tests for the I{extra} option.
"""
def setUp(self):
self.config = trial.Options()
def tearDown(self):
self.config = None
def assertDeprecationWarning(self, deprecatedCallable, warnings):
"""
Check for a deprecation warning
"""
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(warnings[0]['message'],
deprecate.getDeprecationWarningString(
deprecatedCallable, versions.Version('Twisted', 11, 0, 0)))
def test_extraDeprecation(self):
"""
Check that --extra will emit a deprecation warning
"""
self.config.opt_extra('some.sample.test')
self.assertDeprecationWarning(self.config.opt_extra,
self.flushWarnings([self.test_extraDeprecation]))
def test_xDeprecation(self):
"""
Check that -x will emit a deprecation warning
"""
self.config.opt_x('some.sample.text')
self.assertDeprecationWarning(self.config.opt_extra,
self.flushWarnings([self.test_xDeprecation]))
|
|
#!/usr/bin/python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Runs various chrome tests through heapcheck_test.py.
Most of this code is copied from ../valgrind/chrome_tests.py.
TODO(glider): put common functions to a standalone module.
'''
import glob
import logging
import optparse
import os
import stat
import sys
import google.logging_utils
import google.path_utils
# Import the platform_utils up in the layout tests which have been modified to
# work under non-Windows platforms instead of the ones that are in the
# tools/python/google directory. (See chrome_tests.sh which sets PYTHONPATH
# correctly.)
#
# TODO(erg): Copy/Move the relevant functions from the layout_package version
# of platform_utils back up to google.platform_utils
# package. http://crbug.com/6164
import layout_package.path_utils
import common
import heapcheck_test
class TestNotFound(Exception): pass
def Dir2IsNewer(dir1, dir2):
if dir2 is None or not os.path.isdir(dir2):
return False
if dir1 is None or not os.path.isdir(dir1):
return True
return os.stat(dir2)[stat.ST_MTIME] > os.stat(dir1)[stat.ST_MTIME]
def FindNewestDir(dirs):
newest_dir = None
for dir in dirs:
if Dir2IsNewer(newest_dir, dir):
newest_dir = dir
return newest_dir
def File2IsNewer(file1, file2):
if file2 is None or not os.path.isfile(file2):
return False
if file1 is None or not os.path.isfile(file1):
return True
return os.stat(file2)[stat.ST_MTIME] > os.stat(file1)[stat.ST_MTIME]
def FindDirContainingNewestFile(dirs, file):
"""Searches for the directory containing the newest copy of |file|.
Args:
dirs: A list of paths to the directories to search among.
file: A string containing the file name to search.
Returns:
The string representing the the directory containing the newest copy of
|file|.
Raises:
IOError: |file| was not found.
"""
newest_dir = None
newest_file = None
for dir in dirs:
the_file = os.path.join(dir, file)
if File2IsNewer(newest_file, the_file):
newest_dir = dir
newest_file = the_file
if newest_dir is None:
raise IOError("cannot find file %s anywhere, have you built it?" % file)
return newest_dir
class ChromeTests(object):
'''This class is derived from the chrome_tests.py file in ../purify/.
'''
def __init__(self, options, args, test):
# The known list of tests.
# Recognise the original abbreviations as well as full executable names.
self._test_list = {
"base": self.TestBase, "base_unittests": self.TestBase,
"browser": self.TestBrowser, "browser_tests": self.TestBrowser,
"googleurl": self.TestGURL, "googleurl_unittests": self.TestGURL,
"ipc": self.TestIpc, "ipc_tests": self.TestIpc,
"layout": self.TestLayout, "layout_tests": self.TestLayout,
"media": self.TestMedia, "media_unittests": self.TestMedia,
"net": self.TestNet, "net_unittests": self.TestNet,
"printing": self.TestPrinting, "printing_unittests": self.TestPrinting,
"startup": self.TestStartup, "startup_tests": self.TestStartup,
"test_shell": self.TestTestShell, "test_shell_tests": self.TestTestShell,
"ui": self.TestUI, "ui_tests": self.TestUI,
"unit": self.TestUnit, "unit_tests": self.TestUnit,
"app": self.TestApp, "app_unittests": self.TestApp,
}
if test not in self._test_list:
raise TestNotFound("Unknown test: %s" % test)
self._options = options
self._args = args
self._test = test
script_dir = google.path_utils.ScriptDir()
# Compute the top of the tree (the "source dir") from the script dir (where
# this script lives). We assume that the script dir is in tools/heapcheck/
# relative to the top of the tree.
self._source_dir = os.path.dirname(os.path.dirname(script_dir))
# Since this path is used for string matching, make sure it's always
# an absolute Windows-style path.
self._source_dir = layout_package.path_utils.GetAbsolutePath(
self._source_dir)
heapcheck_test_script = os.path.join(script_dir, "heapcheck_test.py")
self._command_preamble = [heapcheck_test_script]
def _DefaultCommand(self, module, exe=None, heapcheck_test_args=None):
'''Generates the default command array that most tests will use.
Args:
module: The module name (corresponds to the dir in src/ where the test
data resides).
exe: The executable name.
heapcheck_test_args: additional arguments to append to the command line.
Returns:
A string with the command to run the test.
'''
module_dir = os.path.join(self._source_dir, module)
# We need multiple data dirs, the current script directory and a module
# specific one. The global suppression file lives in our directory, and the
# module specific suppression file lives with the module.
self._data_dirs = [google.path_utils.ScriptDir()]
if module == "chrome":
# Unfortunately, not all modules have the same directory structure.
self._data_dirs.append(os.path.join(module_dir, "test", "data",
"heapcheck"))
else:
self._data_dirs.append(os.path.join(module_dir, "data", "heapcheck"))
if not self._options.build_dir:
dirs = [
os.path.join(self._source_dir, "xcodebuild", "Debug"),
os.path.join(self._source_dir, "sconsbuild", "Debug"),
os.path.join(self._source_dir, "out", "Debug"),
]
if exe:
self._options.build_dir = FindDirContainingNewestFile(dirs, exe)
else:
self._options.build_dir = FindNewestDir(dirs)
cmd = list(self._command_preamble)
if heapcheck_test_args != None:
for arg in heapcheck_test_args:
cmd.append(arg)
if exe:
cmd.append(os.path.join(self._options.build_dir, exe))
# Heapcheck runs tests slowly, so slow tests hurt more; show elapased time
# so we can find the slowpokes.
cmd.append("--gtest_print_time")
if self._options.gtest_repeat:
cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
return cmd
def Suppressions(self):
'''Builds the list of available suppressions files.'''
ret = []
for directory in self._data_dirs:
suppression_file = os.path.join(directory, "suppressions.txt")
if os.path.exists(suppression_file):
ret.append(suppression_file)
suppression_file = os.path.join(directory, "suppressions_linux.txt")
if os.path.exists(suppression_file):
ret.append(suppression_file)
return ret
def Run(self):
'''Runs the test specified by command-line argument --test.'''
logging.info("running test %s" % (self._test))
return self._test_list[self._test]()
def _ReadGtestFilterFile(self, name, cmd):
'''Reads files which contain lists of tests to filter out with --gtest_filter
and appends the command-line option to |cmd|.
Args:
name: the test executable name.
cmd: the test running command line to be modified.
'''
filters = []
for directory in self._data_dirs:
gtest_filter_files = [
os.path.join(directory, name + ".gtest.txt"),
os.path.join(directory, name + ".gtest-heapcheck.txt"),
os.path.join(directory, name + ".gtest_linux.txt")]
for filename in gtest_filter_files:
if os.path.exists(filename):
logging.info("reading gtest filters from %s" % filename)
f = open(filename, 'r')
for line in f.readlines():
if line.startswith("#") or line.startswith("//") or line.isspace():
continue
line = line.rstrip()
filters.append(line)
gtest_filter = self._options.gtest_filter
if len(filters):
if gtest_filter:
gtest_filter += ":"
if gtest_filter.find("-") < 0:
gtest_filter += "-"
else:
gtest_filter = "-"
gtest_filter += ":".join(filters)
if gtest_filter:
cmd.append("--gtest_filter=%s" % gtest_filter)
def SimpleTest(self, module, name, heapcheck_test_args=None, cmd_args=None):
'''Builds the command line and runs the specified test.
Args:
module: The module name (corresponds to the dir in src/ where the test
data resides).
name: The executable name.
heapcheck_test_args: Additional command line args for heap checker.
cmd_args: Additional command line args for the test.
'''
cmd = self._DefaultCommand(module, name, heapcheck_test_args)
supp = self.Suppressions()
self._ReadGtestFilterFile(name, cmd)
if cmd_args:
cmd.extend(["--"])
cmd.extend(cmd_args)
# Sets LD_LIBRARY_PATH to the build folder so external libraries can be
# loaded.
os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
return heapcheck_test.RunTool(cmd, supp)
def TestBase(self):
return self.SimpleTest("base", "base_unittests")
def TestBrowser(self):
return self.SimpleTest("chrome", "browser_tests")
def TestGURL(self):
return self.SimpleTest("chrome", "googleurl_unittests")
def TestMedia(self):
return self.SimpleTest("chrome", "media_unittests")
def TestPrinting(self):
return self.SimpleTest("chrome", "printing_unittests")
def TestIpc(self):
return self.SimpleTest("ipc", "ipc_tests")
def TestNet(self):
return self.SimpleTest("net", "net_unittests")
def TestStartup(self):
# We don't need the performance results, we're just looking for pointer
# errors, so set number of iterations down to the minimum.
os.putenv("STARTUP_TESTS_NUMCYCLES", "1")
logging.info("export STARTUP_TESTS_NUMCYCLES=1");
return self.SimpleTest("chrome", "startup_tests")
def TestTestShell(self):
return self.SimpleTest("webkit", "test_shell_tests")
def TestUnit(self):
return self.SimpleTest("chrome", "unit_tests")
def TestApp(self):
return self.SimpleTest("chrome", "app_unittests")
def TestUI(self):
return self.SimpleTest("chrome", "ui_tests",
cmd_args=[
"--ui-test-timeout=120000",
"--ui-test-action-timeout=80000",
"--ui-test-action-max-timeout=180000",
"--ui-test-terminate-timeout=60000"])
def TestLayoutChunk(self, chunk_num, chunk_size):
'''Runs tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size).
Wrap around to beginning of list at end. If chunk_size is zero, run all
tests in the list once. If a text file is given as argument, it is used as
the list of tests.
'''
# Build the ginormous commandline in 'cmd'.
# It's going to be roughly
# python heapcheck_test.py ... python run_webkit_tests.py ...
# but we'll use the --indirect flag to heapcheck_test.py
# to avoid heapchecking python.
# Start by building the heapcheck_test.py commandline.
cmd = self._DefaultCommand("webkit")
# Now build script_cmd, the run_webkits_tests.py commandline
# Store each chunk in its own directory so that we can find the data later
chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
test_shell = os.path.join(self._options.build_dir, "test_shell")
out_dir = os.path.join(google.path_utils.ScriptDir(), "latest")
out_dir = os.path.join(out_dir, chunk_dir)
if os.path.exists(out_dir):
old_files = glob.glob(os.path.join(out_dir, "*.txt"))
for f in old_files:
os.remove(f)
else:
os.makedirs(out_dir)
script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
"run_webkit_tests.py")
script_cmd = ["python", script, "--run-singly", "-v",
"--noshow-results", "--time-out-ms=200000",
"--nocheck-sys-deps"]
# Pass build mode to run_webkit_tests.py. We aren't passed it directly,
# so parse it out of build_dir. run_webkit_tests.py can only handle
# the two values "Release" and "Debug".
# TODO(Hercules): unify how all our scripts pass around build mode
# (--mode / --target / --build_dir / --debug)
if self._options.build_dir.endswith("Debug"):
script_cmd.append("--debug");
if (chunk_size > 0):
script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
if len(self._args):
# if the arg is a txt file, then treat it as a list of tests
if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
script_cmd.append("--test-list=%s" % self._args[0])
else:
script_cmd.extend(self._args)
self._ReadGtestFilterFile("layout", script_cmd)
# Now run script_cmd with the wrapper in cmd
cmd.extend(["--"])
cmd.extend(script_cmd)
supp = self.Suppressions()
return heapcheck_test.RunTool(cmd, supp)
def TestLayout(self):
'''Runs the layout tests.'''
# A "chunk file" is maintained in the local directory so that each test
# runs a slice of the layout tests of size chunk_size that increments with
# each run. Since tests can be added and removed from the layout tests at
# any time, this is not going to give exact coverage, but it will allow us
# to continuously run small slices of the layout tests under purify rather
# than having to run all of them in one shot.
chunk_size = self._options.num_tests
if (chunk_size == 0):
return self.TestLayoutChunk(0, 0)
chunk_num = 0
chunk_file = os.path.join("heapcheck_layout_chunk.txt")
logging.info("Reading state from " + chunk_file)
try:
f = open(chunk_file)
if f:
str = f.read()
if len(str):
chunk_num = int(str)
# This should be enough so that we have a couple of complete runs
# of test data stored in the archive (although note that when we loop
# that we almost guaranteed won't be at the end of the test list)
if chunk_num > 10000:
chunk_num = 0
f.close()
except IOError, (errno, strerror):
logging.error("error reading from file %s (%d, %s)" % (chunk_file,
errno, strerror))
ret = self.TestLayoutChunk(chunk_num, chunk_size)
# Wait until after the test runs to completion to write out the new chunk
# number. This way, if the bot is killed, we'll start running again from
# the current chunk rather than skipping it.
logging.info("Saving state to " + chunk_file)
try:
f = open(chunk_file, "w")
chunk_num += 1
f.write("%d" % chunk_num)
f.close()
except IOError, (errno, strerror):
logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
strerror))
# Since we're running small chunks of the layout tests, it's important to
# mark the ones that have errors in them. These won't be visible in the
# summary list for long, but will be useful for someone reviewing this bot.
return ret
def _main(_):
parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
"[-t <test> ...]")
parser.disable_interspersed_args()
parser.add_option("-b", "--build_dir",
help="the location of the output of the compiler output")
parser.add_option("-t", "--test", action="append",
help="which test to run")
parser.add_option("", "--gtest_filter",
help="additional arguments to --gtest_filter")
parser.add_option("", "--gtest_repeat",
help="argument for --gtest_repeat")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="verbose output - enable debug log messages")
# My machine can do about 120 layout tests/hour in release mode.
# Let's do 30 minutes worth per run.
# The CPU is mostly idle, so perhaps we can raise this when
# we figure out how to run them more efficiently.
parser.add_option("-n", "--num_tests", default=60, type="int",
help="for layout tests: # of subtests per run. 0 for all.")
options, args = parser.parse_args()
if options.verbose:
google.logging_utils.config_root(logging.DEBUG)
else:
google.logging_utils.config_root()
if not options.test or not len(options.test):
parser.error("--test not specified")
for t in options.test:
tests = ChromeTests(options, args, t)
ret = tests.Run()
if ret:
return ret
return 0
if __name__ == "__main__":
if sys.platform == 'linux2':
ret = _main(sys.argv)
else:
logging.error("Heap checking works only on Linux at the moment.")
ret = 1
sys.exit(ret)
|
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ['quantity_input']
import inspect
from numbers import Number
from collections.abc import Sequence
from functools import wraps
import numpy as np
from . import _typing as T
from .core import (Unit, UnitBase, UnitsError,
add_enabled_equivalencies, dimensionless_unscaled)
from .function.core import FunctionUnitBase
from .physical import PhysicalType, get_physical_type
from .quantity import Quantity
from .structured import StructuredUnit
NoneType = type(None)
def _get_allowed_units(targets):
"""
From a list of target units (either as strings or unit objects) and physical
types, return a list of Unit objects.
"""
allowed_units = []
for target in targets:
try:
unit = Unit(target)
except (TypeError, ValueError):
try:
unit = get_physical_type(target)._unit
except (TypeError, ValueError, KeyError): # KeyError for Enum
raise ValueError(f"Invalid unit or physical type {target!r}.") from None
allowed_units.append(unit)
return allowed_units
def _validate_arg_value(param_name, func_name, arg, targets, equivalencies,
strict_dimensionless=False):
"""
Validates the object passed in to the wrapped function, ``arg``, with target
unit or physical type, ``target``.
"""
if len(targets) == 0:
return
allowed_units = _get_allowed_units(targets)
# If dimensionless is an allowed unit and the argument is unit-less,
# allow numbers or numpy arrays with numeric dtypes
if (dimensionless_unscaled in allowed_units and not strict_dimensionless
and not hasattr(arg, "unit")):
if isinstance(arg, Number):
return
elif (isinstance(arg, np.ndarray)
and np.issubdtype(arg.dtype, np.number)):
return
for allowed_unit in allowed_units:
try:
is_equivalent = arg.unit.is_equivalent(allowed_unit,
equivalencies=equivalencies)
if is_equivalent:
break
except AttributeError: # Either there is no .unit or no .is_equivalent
if hasattr(arg, "unit"):
error_msg = ("a 'unit' attribute without an 'is_equivalent' method")
else:
error_msg = "no 'unit' attribute"
raise TypeError(f"Argument '{param_name}' to function '{func_name}'"
f" has {error_msg}. You should pass in an astropy "
"Quantity instead.")
else:
error_msg = (f"Argument '{param_name}' to function '{func_name}' must "
"be in units convertible to")
if len(targets) > 1:
targ_names = ", ".join([f"'{str(targ)}'" for targ in targets])
raise UnitsError(f"{error_msg} one of: {targ_names}.")
else:
raise UnitsError(f"{error_msg} '{str(targets[0])}'.")
def _parse_annotation(target):
if target in (None, NoneType, inspect._empty):
return target
# check if unit-like
try:
unit = Unit(target)
except (TypeError, ValueError):
try:
ptype = get_physical_type(target)
except (TypeError, ValueError, KeyError): # KeyError for Enum
if isinstance(target, str):
raise ValueError(f"invalid unit or physical type {target!r}.") from None
else:
return ptype
else:
return unit
# could be a type hint
origin = T.get_origin(target)
if origin is T.Union:
return [_parse_annotation(t) for t in T.get_args(target)]
elif origin is not T.Annotated: # can't be Quantity[]
return False
# parse type hint
cls, *annotations = T.get_args(target)
if not issubclass(cls, Quantity) or not annotations:
return False
# get unit from type hint
unit, *rest = annotations
if not isinstance(unit, (UnitBase, PhysicalType)):
return False
return unit
class QuantityInput:
@classmethod
def as_decorator(cls, func=None, **kwargs):
r"""
A decorator for validating the units of arguments to functions.
Unit specifications can be provided as keyword arguments to the
decorator, or by using function annotation syntax. Arguments to the
decorator take precedence over any function annotations present.
A `~astropy.units.UnitsError` will be raised if the unit attribute of
the argument is not equivalent to the unit specified to the decorator or
in the annotation. If the argument has no unit attribute, i.e. it is not
a Quantity object, a `ValueError` will be raised unless the argument is
an annotation. This is to allow non Quantity annotations to pass
through.
Where an equivalency is specified in the decorator, the function will be
executed with that equivalency in force.
Notes
-----
The checking of arguments inside variable arguments to a function is not
supported (i.e. \*arg or \**kwargs).
The original function is accessible by the attributed ``__wrapped__``.
See :func:`functools.wraps` for details.
Examples
--------
.. code-block:: python
import astropy.units as u
@u.quantity_input(myangle=u.arcsec)
def myfunction(myangle):
return myangle**2
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec):
return myangle**2
Or using a unit-aware Quantity annotation.
.. code-block:: python
@u.quantity_input
def myfunction(myangle: u.Quantity[u.arcsec]):
return myangle**2
Also you can specify a return value annotation, which will
cause the function to always return a `~astropy.units.Quantity` in that
unit.
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec) -> u.deg**2:
return myangle**2
Using equivalencies::
import astropy.units as u
@u.quantity_input(myenergy=u.eV, equivalencies=u.mass_energy())
def myfunction(myenergy):
return myenergy**2
"""
self = cls(**kwargs)
if func is not None and not kwargs:
return self(func)
else:
return self
def __init__(self, func=None, strict_dimensionless=False, **kwargs):
self.equivalencies = kwargs.pop('equivalencies', [])
self.decorator_kwargs = kwargs
self.strict_dimensionless = strict_dimensionless
def __call__(self, wrapped_function):
# Extract the function signature for the function we are wrapping.
wrapped_signature = inspect.signature(wrapped_function)
# Define a new function to return in place of the wrapped one
@wraps(wrapped_function)
def wrapper(*func_args, **func_kwargs):
# Bind the arguments to our new function to the signature of the original.
bound_args = wrapped_signature.bind(*func_args, **func_kwargs)
# Iterate through the parameters of the original signature
for param in wrapped_signature.parameters.values():
# We do not support variable arguments (*args, **kwargs)
if param.kind in (inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL):
continue
# Catch the (never triggered) case where bind relied on a default value.
if (param.name not in bound_args.arguments
and param.default is not param.empty):
bound_args.arguments[param.name] = param.default
# Get the value of this parameter (argument to new function)
arg = bound_args.arguments[param.name]
# Get target unit or physical type, either from decorator kwargs
# or annotations
if param.name in self.decorator_kwargs:
targets = self.decorator_kwargs[param.name]
is_annotation = False
else:
targets = param.annotation
is_annotation = True
# parses to unit if it's an annotation (or list thereof)
targets = _parse_annotation(targets)
# If the targets is empty, then no target units or physical
# types were specified so we can continue to the next arg
if targets is inspect.Parameter.empty:
continue
# If the argument value is None, and the default value is None,
# pass through the None even if there is a target unit
if arg is None and param.default is None:
continue
# Here, we check whether multiple target unit/physical type's
# were specified in the decorator/annotation, or whether a
# single string (unit or physical type) or a Unit object was
# specified
if (isinstance(targets, str)
or not isinstance(targets, Sequence)):
valid_targets = [targets]
# Check for None in the supplied list of allowed units and, if
# present and the passed value is also None, ignore.
elif None in targets or NoneType in targets:
if arg is None:
continue
else:
valid_targets = [t for t in targets if t is not None]
else:
valid_targets = targets
# If we're dealing with an annotation, skip all the targets that
# are not strings or subclasses of Unit. This is to allow
# non unit related annotations to pass through
if is_annotation:
valid_targets = [t for t in valid_targets
if isinstance(t, (str, UnitBase, PhysicalType))]
# Now we loop over the allowed units/physical types and validate
# the value of the argument:
_validate_arg_value(param.name, wrapped_function.__name__,
arg, valid_targets, self.equivalencies,
self.strict_dimensionless)
# Call the original function with any equivalencies in force.
with add_enabled_equivalencies(self.equivalencies):
return_ = wrapped_function(*func_args, **func_kwargs)
# Return
ra = wrapped_signature.return_annotation
valid_empty = (inspect.Signature.empty, None, NoneType, T.NoReturn)
if ra not in valid_empty:
target = (ra if T.get_origin(ra) not in (T.Annotated, T.Union)
else _parse_annotation(ra))
if isinstance(target, str) or not isinstance(target, Sequence):
target = [target]
valid_targets = [t for t in target
if isinstance(t, (str, UnitBase, PhysicalType))]
_validate_arg_value("return", wrapped_function.__name__,
return_, valid_targets, self.equivalencies,
self.strict_dimensionless)
if len(valid_targets) > 0:
return_ <<= valid_targets[0]
return return_
return wrapper
quantity_input = QuantityInput.as_decorator
|
|
#!/usr/bin/env python
"""
Since one might not only be interested in the individual (hyper-)parameters of a bayesloop study, but also in arbitrary
arithmetic combinations of one or more (hyper-)parameters, a parser is needed to compute probability values or
distributions for those derived parameters.
"""
from __future__ import print_function, division
import pyparsing as pp
import re
import operator
import numpy as np
import scipy.special as sp
from tqdm.auto import tqdm
from .exceptions import ConfigurationError
class Parameter(np.ndarray):
"""
Behaves like a Numpy array, but features additional attributes. This allows us to apply arithmetic operations to
the grid of parameter values while keeping track of the corresponding probability grid and the parameter's origin.
"""
def __new__(cls, values, prob, name=None, time=None, study=None):
obj = np.asarray(values).view(cls)
obj.prob = prob
obj.name = name
obj.time = time
obj.study = study
return obj
def __array_finalize__(self, obj):
# see InfoArray.__array_finalize__ for comments
if obj is None:
return
self.prob = getattr(obj, 'prob', None)
self.name = getattr(obj, 'name', None)
self.time = getattr(obj, 'time', None)
self.study = getattr(obj, 'study', None)
class HyperParameter(Parameter):
"""
Behaves like a Numpy array, but features additional attributes. This allows us to apply arithmetic operations to
the grid of hyper-parameter values while keeping track of the corresponding probability grid and the
hyper-parameter's origin.
"""
pass
class Parser:
"""
Computes derived probability values and distributions based on arithmetic operations of (hyper-)parameters.
Args:
studies: One or more bayesloop study instances. All (hyper-)parameters in the specified study object(s) will be
available to the parser.
Example:
::
S = bl.Study()
...
P = bl.Parser(S)
P('sqrt(rate@1910) > 1.')
"""
def __init__(self, *studies):
# import all parameter names
self.studies = studies
if len(self.studies) == 0:
raise ConfigurationError('Parser instance takes at least one Study instance as argument.')
self.names = []
for study in studies:
self.names.extend(study.observationModel.parameterNames)
try:
# OnlineStudy: loop over all transition models
for names in study.hyperParameterNames:
self.names.extend(names)
except AttributeError:
try:
# Hyper/ChangepointStudy: only one transition model
self.names.extend(study.flatHyperParameterNames)
except AttributeError:
pass
if not len(np.unique(self.names)) == len(self.names):
raise ConfigurationError('Specified study objects contain duplicate parameter names.')
# define arithmetic operators
self.arith = {'+': operator.add, '-': operator.sub, '*': operator.mul, '/': operator.truediv, '^': operator.pow}
# initialize symbols for parsing
parameter = pp.oneOf(self.names)
point = pp.Literal(".")
e = pp.CaselessLiteral("E")
fnumber = pp.Combine(pp.Word("+-" + pp.nums, pp.nums) +
pp.Optional(point + pp.Optional(pp.Word(pp.nums))) +
pp.Optional(e + pp.Word("+-" + pp.nums, pp.nums)))
# initialize list of all numpy functions, remove functions that collide with (hyper-)parameter names
self.functions = dir(np) + dir(sp)
for name in self.names:
try:
self.functions.remove(name)
print('! WARNING: Function "{}" will not be available in parser, as it collides with '
'(hyper-)parameter names.'.format(name))
except ValueError:
pass
# initialize operators for parsing
funcop = pp.oneOf(self.functions)
atop = pp.Literal('@')
expop = pp.Literal('^')
signop = pp.oneOf('+ -')
multop = pp.oneOf('* /')
plusop = pp.oneOf('+ -')
# minimal symbol
atom = (parameter | fnumber)
# expression based on operator precedence
self.expr = pp.infixNotation(atom, [(funcop, 1, pp.opAssoc.RIGHT),
(atop, 2, pp.opAssoc.LEFT),
(expop, 2, pp.opAssoc.RIGHT),
(signop, 1, pp.opAssoc.RIGHT),
(multop, 2, pp.opAssoc.LEFT),
(plusop, 2, pp.opAssoc.LEFT)])
def _evaluate(self, parsedString):
"""
Recursive function to evaluate nested mathematical operations on (Hyper)Parameter instances.
Args:
parsedString(list): nested list generated from query by parser
Returns:
Derived Parameter instance
"""
# cases like "3*3*2" are split into "(3*3)*2"
if len(parsedString) > 3:
while len(parsedString) > 3:
if parsedString[0] in self.functions:
parsedString = [parsedString[:2]] + parsedString[2:]
else:
parsedString = [parsedString[:3]] + parsedString[3:]
result = []
for e in parsedString:
if isinstance(e, list):
# unary minus: "-4" --> "(-1)*4"
if len(e) == 2 and e[0] == '-':
e = ['-1', '*', e[1]]
# unary plus: "+4" --> "1*4"
elif len(e) == 2 and e[0] == '+':
e = ['1', '*', e[1]]
# numpy function
elif len(e) == 2 and isinstance(e[0], str):
e = [e[0], 'func', e[1]]
# recursion
result.append(self._evaluate(e))
else:
result.append(e)
result = self._operation(result[1], result[0], result[2])
return result
def _convert(self, string):
"""
Converts string in query to either a Parameter instance, a Numpy function, a scipy.special function or
a float number.
Args:
string(str): string to convert
Returns:
Parameter instance, function or float
"""
if string in self.names:
param = [p for p in self.parameters if p.name == string][0]
return param.copy()
elif isinstance(string, str) and (string in dir(np)) and callable(getattr(np, string)):
return getattr(np, string)
elif isinstance(string, str) and (string in dir(sp)) and callable(getattr(sp, string)):
return getattr(sp, string)
else:
return float(string)
def _operation(self, symbol, a, b):
"""
Handles arithmetic operations and selection of time steps for (hyper-)parameters.
Args:
symbol(str): operator symbol (one of '+-*/^@' or 'func')
a: Parameter/HyperParameter instance, or number, or numpy function name
b: Parameter/HyperParameter instance, or number
Returns:
Derived Parameter/HyperParameter instance, or number
"""
if isinstance(a, str):
a = self._convert(a)
if isinstance(b, str):
b = self._convert(b)
# time operation
if symbol == '@':
if (type(a) == Parameter or (type(a) == HyperParameter and len(a.prob.shape) == 2)) and \
not (type(b) == Parameter or type(b) == HyperParameter):
timeIndex = list(a.study.formattedTimestamps).index(b)
a.prob = a.prob[timeIndex]
a.time = b
return a
# numpy function
if symbol == 'func':
return a(b)
# arithmetic operation
elif symbol in self.arith.keys():
# only perform arithmetic operations on parameters if timestamp is defined by "@" operator or
# global time "t=..."
if type(a) == Parameter and a.name != '_derived' and a.time is None:
raise ConfigurationError('No timestamp defined for parameter "{}"'.format(a.name))
if type(b) == Parameter and b.name != '_derived' and b.time is None:
raise ConfigurationError('No timestamp defined for parameter "{}"'.format(b.name))
# check if hyper-parameters from OnlineStudy instances have a defined time step
if type(a) == HyperParameter and len(a.prob.shape) == 2 and a.time is None:
raise ConfigurationError('No timestamp defined for hyper-parameter "{}"'.format(a.name))
if type(b) == HyperParameter and len(b.prob.shape) == 2 and b.time is None:
raise ConfigurationError('No timestamp defined for hyper-parameter "{}"'.format(b.name))
# compute compound distribution of two (hyper-)parameters
if (type(a) == Parameter and type(b) == Parameter and (not (a.study is b.study) or
(a.study is None and b.study is None) or
(a.name == b.name and not (a.time == b.time)))) or \
(type(a) == HyperParameter and type(b) == HyperParameter and (not (a.study is b.study) or
(a.study is None and b.study is None))) or \
((type(a) == HyperParameter) and (type(b) == Parameter) or
(type(b) == HyperParameter) and (type(a) == Parameter)):
valueTuples = np.array(np.meshgrid(a, b)).T.reshape(-1, 2)
values = self.arith[symbol](valueTuples[:, 0], valueTuples[:, 1])
prob = np.prod(np.array(np.meshgrid(a.prob, b.prob)).T.reshape(-1, 2), axis=1)
prob /= np.sum(prob)
return Parameter(values, prob, name='_derived') # derived objects are always "parameters"
# apply operator directly if compound distribution is not needed
else:
return self.arith[symbol](a, b)
def __call__(self, query, t=None, silent=False):
self.parameters = []
# load parameter values, probabilities
if t is None:
for study in self.studies:
# check for OnlineStudy
storeHistory = -1
try:
storeHistory = study.storeHistory
except AttributeError:
pass
if storeHistory == -1 or storeHistory == 1:
names = study.observationModel.parameterNames
for i, name in enumerate(names):
index = study.observationModel.parameterNames.index(name)
self.parameters.append(Parameter(np.ravel(study.grid[index]),
np.array([np.ravel(post) for post in study.posteriorSequence]),
name=name,
study=study))
else:
names = study.observationModel.parameterNames
for i, name in enumerate(names):
index = study.observationModel.parameterNames.index(name)
self.parameters.append(Parameter(np.ravel(study.grid[index]),
np.ravel(study.marginalizedPosterior),
name=name,
time=study.formattedTimestamps[-1],
study=study))
else:
# compute index of timestamp
timeIndex = list(self.studies[0].formattedTimestamps).index(t)
for study in self.studies:
names = study.observationModel.parameterNames
for i, name in enumerate(names):
index = study.observationModel.parameterNames.index(name)
self.parameters.append(Parameter(np.ravel(study.grid[index]),
np.ravel(study.posteriorSequence[timeIndex]),
name=name,
time=t,
study=study))
# load hyper-parameter values, probabilities
for study in self.studies:
# check for OnlineStudy
try:
allNames = study.hyperParameterNames
# loop over different transition models
for j, names in enumerate(allNames):
# loop over hyper-parameters in transition model
for i, name in enumerate(names):
index = study._getHyperParameterIndex(study.transitionModels[j], name)
if t is None:
if study.storeHistory:
# extract sequence of only one hyper-parameter
hps = []
for x in study.hyperParameterSequence:
dist = x[j]/np.sum(x[j])
hps.append(dist)
hps = np.array(hps)
self.parameters.append(HyperParameter(study.hyperParameterValues[j][:, index],
hps,
name=name,
study=study))
else:
dist = study.hyperParameterDistribution[j]/np.sum(study.hyperParameterDistribution[j])
self.parameters.append(HyperParameter(study.hyperParameterValues[j][:, index],
dist,
name=name,
time=study.formattedTimestamps[-1],
study=study))
else:
if study.storeHistory:
# compute index of timestamp
timeIndex = list(self.studies[0].formattedTimestamps).index(t)
dist = study.hyperParameterSequence[timeIndex][j] / \
np.sum(study.hyperParameterSequence[timeIndex][j])
self.parameters.append(HyperParameter(study.hyperParameterValues[j][:, index],
dist,
name=name,
time=t,
study=study))
else:
raise ConfigurationError('OnlineStudy instance is not configured to store history, '
'cannot access t={}.'.format(t))
except AttributeError:
# check for Hyper/ChangepointStudy, i.e. whether study type supports hyper-parameter inference
try:
names = study.flatHyperParameterNames
for i, name in enumerate(names):
index = study._getHyperParameterIndex(study.transitionModel, name)
# probability values
normedDist = study.hyperParameterDistribution / np.sum(study.hyperParameterDistribution)
# hyper-parameter values
try:
values = study.allHyperGridValues # Changepoint-Study
except AttributeError:
values = study.hyperGridValues # Hyper-Study
self.parameters.append(HyperParameter(values[:, index],
normedDist,
name=name,
study=study))
except AttributeError:
# do not try to access hyper-parameters of basic Study class
continue
# reduce equation
splitQuery = re.split('>=|<=|==|>|<', query)
if len(splitQuery) == 1:
reducedQuery = query
elif len(splitQuery) == 2:
# last arithmetic may be omitted in some cases if right side is appended to the left, needs to come first
#reducedQuery = '-'.join(splitQuery)
reducedQuery = '-1*('+splitQuery[1]+')+'+splitQuery[0]
else:
raise ConfigurationError('Use exactly one operator out of (<, >, <=, >=, ==) to obtain probability value, '
'or none to obtain derived distribution.')
# evaluate left side
parsedString = self.expr.parseString(reducedQuery).asList()[0]
derivedParameter = self._evaluate(parsedString)
# if no relational operator in query, compute derived distribution
if len(splitQuery) == 1:
derivedParameter[np.isinf(derivedParameter)] = np.nan
dmin = np.nanmin(derivedParameter)
dmax = np.nanmax(derivedParameter)
# bin size is chosen as maximal difference between two derived values
print(f"dmax {dmax}")
print(f"dmin {dmin}")
print(np.diff(np.sort(derivedParameter)))
nBins = int((dmax-dmin)/(np.nanmax(np.diff(np.sort(derivedParameter)))))
bins = np.linspace(dmin, dmax, nBins)
binnedValues = bins[:-1] + (bins[1]-bins[0])
binnedProbs = []
if not silent:
print('+ Computing distribution: {}'.format(query))
it = tqdm(zip(bins[:-1], bins[1:]), total=len(binnedValues))
else:
it = zip(bins[:-1], bins[1:])
for lower, upper in it:
binnedProbs.append(np.sum(derivedParameter.prob[(derivedParameter >= lower) * (derivedParameter < upper)]))
binnedProbs = np.array(binnedProbs)
return binnedValues, binnedProbs
# if relational operator in query, compute probability value
elif len(splitQuery) == 2:
# assign operator
if '>=' in query:
op = operator.ge
elif '>' in query:
op = operator.gt
elif '<=' in query:
op = operator.le
elif '<' in query:
op = operator.lt
elif '==' in query:
op = operator.eq
# compute probability
mask = op(derivedParameter, 0.)
p = np.sum(derivedParameter.prob[mask])
if not silent:
print('P({}) = {}'.format(query, p))
return p
else:
raise ConfigurationError('More than one relational operator found in query.')
|
|
from operator import methodcaller
from typing import (
Callable,
Dict,
List,
Type,
TypeVar,
Union,
Optional,
)
from configargparse import Namespace
from .event import Events
from .exception import RunnerAlreadyExistsError
from .stats import RequestStats
from .runners import Runner, LocalRunner, MasterRunner, WorkerRunner
from .web import WebUI
from .user import User
from .user.task import TaskSet, filter_tasks_by_tags
from .shape import LoadTestShape
RunnerType = TypeVar("RunnerType", bound=Runner)
class Environment:
def __init__(
self,
*,
user_classes: Union[List[Type[User]], None] = None,
shape_class: Union[LoadTestShape, None] = None,
tags: Union[List[str], None] = None,
locustfile: str = None,
exclude_tags=None,
events: Events = None,
host: str = None,
reset_stats=False,
stop_timeout: Union[float, None] = None,
catch_exceptions=True,
parsed_options: Namespace = None,
):
self.runner: Optional[Runner] = None
"""Reference to the :class:`Runner <locust.runners.Runner>` instance"""
self.web_ui: Optional[WebUI] = None
"""Reference to the WebUI instance"""
self.process_exit_code: Optional[int] = None
"""
If set it'll be the exit code of the Locust process
"""
if events:
self.events = events
"""
Event hooks used by Locust internally, as well as to extend Locust's functionality
See :ref:`events` for available events.
"""
else:
self.events = Events()
self.locustfile = locustfile
"""Filename (not path) of locustfile"""
self.user_classes: List[Type[User]] = user_classes or []
"""User classes that the runner will run"""
self.shape_class = shape_class
"""A shape class to control the shape of the load test"""
self.tags = tags
"""If set, only tasks that are tagged by tags in this list will be executed. Leave this as None to use the one from parsed_options"""
self.exclude_tags = exclude_tags
"""If set, only tasks that aren't tagged by tags in this list will be executed. Leave this as None to use the one from parsed_options"""
self.stats = RequestStats()
"""Reference to RequestStats instance"""
self.host = host
"""Base URL of the target system"""
self.reset_stats = reset_stats
"""Determines if stats should be reset once all simulated users have been spawned"""
self.stop_timeout = stop_timeout
"""
If set, the runner will try to stop the running users gracefully and wait this many seconds
before killing them hard.
"""
self.catch_exceptions = catch_exceptions
"""
If True exceptions that happen within running users will be caught (and reported in UI/console).
If False, exceptions will be raised.
"""
self.parsed_options = parsed_options
"""Reference to the parsed command line options (used to pre-populate fields in Web UI). May be None when using Locust as a library"""
self._remove_user_classes_with_weight_zero()
# Validate there's no class with the same name but in different modules
if len({user_class.__name__ for user_class in self.user_classes}) != len(self.user_classes):
raise ValueError(
"The following user classes have the same class name: {}".format(
", ".join(map(methodcaller("fullname"), self.user_classes))
)
)
if self.shape_class is not None and not isinstance(self.shape_class, LoadTestShape):
raise ValueError(
"shape_class should be instance of LoadTestShape or subclass LoadTestShape, but got: %s"
% self.shape_class
)
def _create_runner(
self,
runner_class: Type[RunnerType],
*args,
**kwargs,
) -> RunnerType:
if self.runner is not None:
raise RunnerAlreadyExistsError(f"Environment.runner already exists ({self.runner})")
self.runner = runner_class(self, *args, **kwargs)
# Attach the runner to the shape class so that the shape class can access user count state
if self.shape_class:
self.shape_class.runner = self.runner
return self.runner
def create_local_runner(self) -> LocalRunner:
"""
Create a :class:`LocalRunner <locust.runners.LocalRunner>` instance for this Environment
"""
return self._create_runner(LocalRunner)
def create_master_runner(self, master_bind_host="*", master_bind_port=5557) -> MasterRunner:
"""
Create a :class:`MasterRunner <locust.runners.MasterRunner>` instance for this Environment
:param master_bind_host: Interface/host that the master should use for incoming worker connections.
Defaults to "*" which means all interfaces.
:param master_bind_port: Port that the master should listen for incoming worker connections on
"""
return self._create_runner(
MasterRunner,
master_bind_host=master_bind_host,
master_bind_port=master_bind_port,
)
def create_worker_runner(self, master_host, master_port) -> WorkerRunner:
"""
Create a :class:`WorkerRunner <locust.runners.WorkerRunner>` instance for this Environment
:param master_host: Host/IP of a running master node
:param master_port: Port on master node to connect to
"""
# Create a new RequestStats with use_response_times_cache set to False to save some memory
# and CPU cycles, since the response_times_cache is not needed for Worker nodes
self.stats = RequestStats(use_response_times_cache=False)
return self._create_runner(
WorkerRunner,
master_host=master_host,
master_port=master_port,
)
def create_web_ui(
self,
host="",
port=8089,
auth_credentials=None,
tls_cert=None,
tls_key=None,
stats_csv_writer=None,
delayed_start=False,
):
"""
Creates a :class:`WebUI <locust.web.WebUI>` instance for this Environment and start running the web server
:param host: Host/interface that the web server should accept connections to. Defaults to ""
which means all interfaces
:param port: Port that the web server should listen to
:param auth_credentials: If provided (in format "username:password") basic auth will be enabled
:param tls_cert: An optional path (str) to a TLS cert. If this is provided the web UI will be
served over HTTPS
:param tls_key: An optional path (str) to a TLS private key. If this is provided the web UI will be
served over HTTPS
:param stats_csv_writer: `StatsCSV <stats_csv.StatsCSV>` instance.
:param delayed_start: Whether or not to delay starting web UI until `start()` is called. Delaying web UI start
allows for adding Flask routes or Blueprints before accepting requests, avoiding errors.
"""
self.web_ui = WebUI(
self,
host,
port,
auth_credentials=auth_credentials,
tls_cert=tls_cert,
tls_key=tls_key,
stats_csv_writer=stats_csv_writer,
delayed_start=delayed_start,
)
return self.web_ui
def _filter_tasks_by_tags(self):
"""
Filter the tasks on all the user_classes recursively, according to the tags and
exclude_tags attributes
"""
if getattr(self, "_tasks_filtered", False):
return # only filter once
self._tasks_filtered = True
if self.tags is not None:
tags = set(self.tags)
elif self.parsed_options and self.parsed_options.tags:
tags = set(self.parsed_options.tags)
else:
tags = None
if self.exclude_tags is not None:
exclude_tags = set(self.exclude_tags)
elif self.parsed_options and self.parsed_options.exclude_tags:
exclude_tags = set(self.parsed_options.exclude_tags)
else:
exclude_tags = None
for user_class in self.user_classes:
filter_tasks_by_tags(user_class, tags, exclude_tags)
def _remove_user_classes_with_weight_zero(self):
"""
Remove user classes having a weight of zero.
"""
if len(self.user_classes) == 0:
# Preserve previous behaviour that allowed no user classes to be specified.
return
filtered_user_classes = [
user_class for user_class in self.user_classes if user_class.weight > 0 or user_class.fixed_count > 0
]
if len(filtered_user_classes) == 0:
# TODO: Better exception than `ValueError`?
raise ValueError("There are no users with weight > 0.")
self.user_classes[:] = filtered_user_classes
def assign_equal_weights(self):
"""
Update the user classes such that each user runs their specified tasks with equal
probability.
"""
for u in self.user_classes:
u.weight = 1
user_tasks = []
tasks_frontier = u.tasks
while len(tasks_frontier) != 0:
t = tasks_frontier.pop()
if hasattr(t, "tasks") and t.tasks:
tasks_frontier.extend(t.tasks)
elif isinstance(t, Callable):
if t not in user_tasks:
user_tasks.append(t)
else:
raise ValueError("Unrecognized task type in user")
u.tasks = user_tasks
@property
def user_classes_by_name(self) -> Dict[str, Type[User]]:
return {u.__name__: u for u in self.user_classes}
|
|
import numpy
import fractions
import math
import sympy
from functools import reduce
def rotate(l, n):
return l[n:] + l[:n]
class Attractor:
# TODO: Use a general class for attractors (everywhere)
def __init__(self, states):
largest_ind = max(list(range(len(states))), key=lambda t: order_key_func(states[t]))
self.states = tuple(tuple(states[(t + largest_ind + 1) % len(states)]) for t in range(len(states)))
def __eq__(self, other):
"""
Compares attractors invariant to rotation (by rotating both to have largest state last)
Note that the container types used for attractor states are important (e.g. [0,0,0] != (0,0,0))
:param self:
:param other:
:return:
"""
if not isinstance(other, Attractor):
raise NotImplementedError("Can't compare an attractor to anything else")
if len(self.states) != len(other.states):
return False
return self.states == other.states
def __ne__(self, other):
return not self == other
def __hash__(self):
return tuple([tuple(s) for s in self.states]).__hash__()
def __str__(self):
return "Attractor, states: {}".format(self.states)
def __repr__(self):
return str(self)
def order_key_func(node_states): return sum(node * 2 ** i for (i, node) in enumerate(node_states))
def list_repr(elements):
if len(elements) == 0:
return "[]"
else:
return "[" + str(reduce(lambda x, y: str(x) + ", " + str(y), elements)) + "]"
def slice_int(x, k, d):
"""
Slices an integer in the domain [0,2**d] to k-bit parts, and returns the
respective parts (in range 0-2**k - 1), MSB to LSB.
:param x: A positive integer
:param k: An integer representing the size of the slices, in bits
:param d: The range of x. If x < 2**d - 1, it is treated as left padded with zeros.
:return:
"""
return [(x >> i*k) % 2**k for i in range(int(math.ceil(float(d)/k)))][::-1]
def divisors(x):
"""
divisor list for n, copy-pasted (and corrected for 1) from stack-overflow
:param x:
:return:
"""
if x == 1:
return [x]
div_list = []
y = 1
while y <= int(math.sqrt(x)):
if x % y == 0:
div_list.append(y)
if x / y != y:
div_list.append(int(x / y))
y += 1
return div_list
def phi(n):
"""
euler's totient function, copy-pasted from stack-overflow
:param n:
:return:
"""
amount = 0
for k in range(1, n + 1): # n+1 important for phi(1)=1
if fractions.gcd(n, k) == 1:
amount += 1
return amount
def binary_necklaces(n):
"""
returns the number of binary necklaces over n bits, i.e. number of binary strings up to rotation.
derivation here - www.quora.com/How-many-unique-binary-matrices-are-there-up-to-rotations-translations-and-flips
also here - https://en.wikipedia.org/wiki/Necklace_(combinatorics)#Number_of_necklaces
:param n:
:return:
"""
s = 0
for divisor in divisors(n):
s += phi(n / divisor) * (2**divisor)
# print "d={}, phi(n/d)={}, s={}".format(divisor, phi(n/divisor), s)
return s / n
def attractor_sets_equality(first_attractors, second_attractors):
"""
Compares two containers of attractors, invariant to rotations.
For efficiency (hopefully), creates a set-like structure for comparisons.
:param self.statess:
:param other.statess:
:return:
"""
if len(first_attractors) != len(second_attractors):
return False
first_attractors_set = set(Attractor(att) for att in first_attractors)
second_attractors_set = set(Attractor(att) for att in second_attractors)
return first_attractors_set == second_attractors_set
def is_attractor_in_attractor_list(attractor, attractor_list):
first_attractor = Attractor(attractor)
second_attractors_set = set(Attractor(att) for att in attractor_list)
return first_attractor in second_attractors_set
def attractor_lists_intersection_size(first_list, second_list):
# TODO: write tests? Seems straightforward.
first_attractors_set = set(Attractor(att) for att in first_list)
second_attractors_set = set(Attractor(att) for att in second_list)
return len(first_attractors_set.intersection(second_attractors_set))
def is_same_attractor(a1, a2):
"""
:param a1: an attractor, represented as an ordered iterable of network states
:param a2: ""
:return: True iff the attractors have same states in same order, up to a shift.
"""
if len(a1) != len(a2):
return False
a1 = tuple(tuple(1 if v_state else 0 for v_state in s) for s in a1)
a2 = tuple(tuple(1 if v_state else 0 for v_state in s) for s in a2)
for shift in range(len(a1)):
if a1 == rotate(a2, shift):
return True
return False
def is_same_state(s1, s2):
"""
:param s1: A network state, represented as an ordered iterable of values interpretable as boolean.
:param s2: ""
:return: True if s1 and s2 represent the same state
"""
if len(s1) != len(s2):
# return False
raise ValueError("Can't compare states from models of different size.")
for v_state in s1:
assert v_state in [0, 1, False, True, sympy.false, sympy.true], "illegal state for a vertex: {}".format(v_state)
for v_state in s2:
assert v_state in [0, 1, False, True, sympy.false, sympy.true], "illegal state for a vector: {}".format(v_state)
s1_standard = tuple(1 if v_state else 0 for v_state in s1)
s2_standard = tuple(1 if v_state else 0 for v_state in s2)
return s1_standard == s2_standard
def is_attractor_valid(attractor, G):
"""
Checks whether an attractor is valid in the model G, regardless of representation.
:param attractor:
:param G:
:return:
"""
# TODO: write tests!
for state, next_state in zip(attractor, rotate(attractor, 1)):
if not is_same_state(G.next_state(state), next_state):
return False
return True
def choose_k_bits_from_vertex_functions(degrees, k):
"""
Given a list of vertex degrees and an integer k, choose k different bits to change in the functions of vertices.
The choice is uniform over all possible k choices of lines in the collection of truth tables of nodes.
Does not choose value for input nodes (degree 0)
Returns a dictionary, where keys are node indices and values are indices of lists of lines in their truth tables.
:param degrees:
:param k:
:return:
"""
# TODO: write tests (I experimented by hand)
cur_lines = 0
cumulative_n_lines = []
for degree in degrees:
cur_lines += (2 ** degree) if degree != 0 else 0
cumulative_n_lines.append(cur_lines)
indices = numpy.random.choice(range(cumulative_n_lines[-1]), replace=False, size=k)
choices = dict()
for index in indices:
for degree_index in range(len(cumulative_n_lines)):
if index < cumulative_n_lines[degree_index]:
choices[degree_index] = choices.get(degree_index, []) + [
index - (cumulative_n_lines[degree_index - 1] if (degree_index > 0) else 0)]
break
return choices
def parse_ge_file(path):
"""
Given the path of a GE readout file and a corresponding model, returns a list of partial model state such that
each partial state is a dict mapping measured node names to their Boolean value.
File is formatted with first line as the header #activated ligands/inhibited proteins | readouts,
then each line has (in regex syntax) (NODE_NAME BINARY_VALUE)* | | (NODE_NAME BINARY_VALUE)* # #,
the first half corresponding to input nodes.
:param path:
:return:
"""
with open(path, "r") as ge_file:
lines = ge_file.readlines()[1:]
readouts = []
for line in lines:
if line == "\n":
continue # probably last line
clean_line = line.replace("|", "").replace("#", "") # we don't distinguish input nodes
tokens = clean_line.split()
values_dict = {tokens[2 * i]: bool(int(tokens[2 * i + 1])) for i in range(len(tokens) / 2)}
readouts.append(values_dict)
return readouts
|
|
import weakref, sys
from rpython.rlib.rstrategies import logger
from rpython.rlib import jit, objectmodel, rerased
from rpython.rlib.objectmodel import specialize
def make_accessors(strategy='strategy', storage='storage'):
"""
Instead of using this generator, the methods can be implemented manually.
A third way is to overwrite the getter/setter methods in StrategyFactory.
"""
def make_getter(attr):
def getter(self): return getattr(self, attr)
return getter
def make_setter(attr):
def setter(self, val): setattr(self, attr, val)
return setter
classdef = sys._getframe(1).f_locals
classdef['_get_strategy'] = make_getter(strategy)
classdef['_set_strategy'] = make_setter(strategy)
classdef['_get_storage'] = make_getter(storage)
classdef['_set_storage'] = make_setter(storage)
class StrategyMetaclass(type):
"""
A metaclass is required, because we need certain attributes to be special
for every single strategy class.
"""
def __new__(self, name, bases, attrs):
attrs['_is_strategy'] = False
attrs['_is_singleton'] = False
attrs['_specializations'] = []
# Not every strategy uses rerased-pairs, but they won't hurt
erase, unerase = rerased.new_erasing_pair(name)
def get_storage(self, w_self):
erased = self.strategy_factory().get_storage(w_self)
return unerase(erased)
def set_storage(self, w_self, storage):
erased = erase(storage)
self.strategy_factory().set_storage(w_self, erased)
attrs['get_storage'] = get_storage
attrs['set_storage'] = set_storage
return type.__new__(self, name, bases, attrs)
def strategy(generalize=None, singleton=True):
"""
Strategy classes must be decorated with this.
generalize is a list of other strategies, that can be switched to from the decorated strategy.
If the singleton flag is set to False, new strategy instances will be created,
instead of always reusing the singleton object.
"""
def decorator(strategy_class):
# Patch strategy class: Add generalized_strategy_for and mark as strategy class.
if generalize:
@jit.unroll_safe
def generalized_strategy_for(self, value):
# TODO - optimize this method
for strategy in generalize:
if self.strategy_factory().strategy_singleton_instance(strategy)._check_can_handle(value):
return strategy
raise Exception("Could not find generalized strategy for %s coming from %s" % (value, self))
strategy_class.generalized_strategy_for = generalized_strategy_for
for generalized in generalize:
generalized._specializations.append(strategy_class)
strategy_class._is_strategy = True
strategy_class._generalizations = generalize
strategy_class._is_singleton = singleton
return strategy_class
return decorator
class StrategyFactory(object):
_immutable_fields_ = ["strategies[*]", "logger", "strategy_singleton_field"]
factory_instance_counter = 0
def __init__(self, root_class, all_strategy_classes=None):
if all_strategy_classes is None:
all_strategy_classes = self._collect_subclasses(root_class)
self.strategies = []
self.logger = logger.Logger()
# This is to avoid confusion between multiple factories existing simultaneously (e.g. in tests)
self.strategy_singleton_field = "__singleton_%i" % StrategyFactory.factory_instance_counter
StrategyFactory.factory_instance_counter += 1
self._create_strategy_instances(root_class, all_strategy_classes)
def _create_strategy_instances(self, root_class, all_strategy_classes):
for strategy_class in all_strategy_classes:
if strategy_class._is_strategy:
setattr(strategy_class, self.strategy_singleton_field, self.instantiate_strategy(strategy_class))
self.strategies.append(strategy_class)
self._patch_strategy_class(strategy_class, root_class)
self._order_strategies()
# =============================
# API methods
# =============================
def switch_strategy(self, w_self, new_strategy_type, new_element=None):
"""
Switch the strategy of w_self to the new type.
new_element can be given as as hint, purely for logging purposes.
It should be the object that was added to w_self, causing the strategy switch.
"""
old_strategy = self.get_strategy(w_self)
if new_strategy_type._is_singleton:
new_strategy = self.strategy_singleton_instance(new_strategy_type)
else:
size = old_strategy.size(w_self)
new_strategy = self.instantiate_strategy(new_strategy_type, w_self, size)
self.set_strategy(w_self, new_strategy)
old_strategy._convert_storage_to(w_self, new_strategy)
new_strategy.strategy_switched(w_self)
self.log(w_self, new_strategy, old_strategy, new_element)
return new_strategy
def set_initial_strategy(self, w_self, strategy_type, size, elements=None):
"""
Initialize the strategy and storage fields of w_self.
This must be called before switch_strategy or any strategy method can be used.
elements is an optional list of values initially stored in w_self.
If given, then len(elements) == size must hold.
"""
assert self.get_strategy(w_self) is None, "Strategy should not be initialized yet!"
if strategy_type._is_singleton:
strategy = self.strategy_singleton_instance(strategy_type)
else:
strategy = self.instantiate_strategy(strategy_type, w_self, size)
self.set_strategy(w_self, strategy)
strategy._initialize_storage(w_self, size)
element = None
if elements:
strategy.store_all(w_self, elements)
if len(elements) > 0: element = elements[0]
strategy.strategy_switched(w_self)
self.log(w_self, strategy, None, element)
return strategy
@jit.unroll_safe
def strategy_type_for(self, objects):
"""
Return the best-fitting strategy to hold all given objects.
"""
specialized_strategies = len(self.strategies)
can_handle = [True] * specialized_strategies
for obj in objects:
if specialized_strategies <= 1:
break
for i, strategy in enumerate(self.strategies):
if can_handle[i] and not self.strategy_singleton_instance(strategy)._check_can_handle(obj):
can_handle[i] = False
specialized_strategies -= 1
for i, strategy_type in enumerate(self.strategies):
if can_handle[i]:
return strategy_type
raise ValueError("Could not find strategy to handle: %s" % objects)
def decorate_strategies(self, transitions):
"""
As an alternative to decorating all strategies with @strategy,
invoke this in the constructor of your StrategyFactory subclass, before
calling __init__. transitions is a dict mapping all strategy classes to
their 'generalize' list parameter (see @strategy decorator).
"""
"NOT_RPYTHON"
for strategy_class, generalized in transitions.items():
strategy(generalized)(strategy_class)
# =============================
# The following methods can be overwritten to customize certain aspects of the factory.
# =============================
def instantiate_strategy(self, strategy_type, w_self=None, initial_size=0):
"""
Return a functional instance of strategy_type.
Overwrite this if you need a non-default constructor.
The two additional parameters should be ignored for singleton-strategies.
"""
return strategy_type()
def log(self, w_self, new_strategy, old_strategy=None, new_element=None):
"""
This can be overwritten into a more appropriate call to self.logger.log
"""
if not self.logger.active: return
new_strategy_str = self.log_string_for_object(new_strategy)
old_strategy_str = self.log_string_for_object(old_strategy)
element_typename = self.log_string_for_object(new_element)
size = new_strategy.size(w_self)
typename = ""
cause = "Switched" if old_strategy else "Created"
self.logger.log(new_strategy_str, size, cause, old_strategy_str, typename, element_typename)
@specialize.call_location()
def log_string_for_object(self, obj):
"""
This can be overwritten instead of the entire log() method.
Keep the specialize-annotation in order to handle different kinds of objects here.
"""
return obj.__class__.__name__ if obj else ""
# These storage accessors are specialized because the storage field is
# populated by erased-objects which seem to be incompatible sometimes.
@specialize.call_location()
def get_storage(self, obj):
return obj._get_storage()
@specialize.call_location()
def set_storage(self, obj, val):
return obj._set_storage(val)
def get_strategy(self, obj):
return obj._get_strategy()
def set_strategy(self, obj, val):
return obj._set_strategy(val)
# =============================
# Internal methods
# =============================
def _patch_strategy_class(self, strategy_class, root_class):
"NOT_RPYTHON"
# Patch root class: Add default handler for visitor
def _convert_storage_from_OTHER(self, w_self, previous_strategy):
self._convert_storage_from(w_self, previous_strategy)
funcname = "_convert_storage_from_" + strategy_class.__name__
_convert_storage_from_OTHER.func_name = funcname
setattr(root_class, funcname, _convert_storage_from_OTHER)
# Patch strategy class: Add polymorphic visitor function
def _convert_storage_to(self, w_self, new_strategy):
getattr(new_strategy, funcname)(w_self, self)
strategy_class._convert_storage_to = _convert_storage_to
def _collect_subclasses(self, cls):
"NOT_RPYTHON"
subclasses = []
for subcls in cls.__subclasses__():
subclasses.append(subcls)
subclasses.extend(self._collect_subclasses(subcls))
return subclasses
def _order_strategies(self):
"NOT_RPYTHON"
def get_generalization_depth(strategy, visited=None):
if visited is None:
visited = set()
if strategy._generalizations:
if strategy in visited:
raise Exception("Cycle in generalization-tree of %s" % strategy)
visited.add(strategy)
depth = 0
for generalization in strategy._generalizations:
other_depth = get_generalization_depth(generalization, set(visited))
depth = max(depth, other_depth)
return depth + 1
else:
return 0
self.strategies.sort(key=get_generalization_depth, reverse=True)
@jit.elidable
def strategy_singleton_instance(self, strategy_class):
return getattr(strategy_class, self.strategy_singleton_field)
def _freeze_(self):
# Instance will be frozen at compile time, making accesses constant.
# The constructor does meta stuff which is not possible after translation.
return True
class AbstractStrategy(object):
"""
== Required:
strategy_factory(self) - Access to StorageFactory
"""
def strategy_switched(self, w_self):
# Overwrite this method for a hook whenever the strategy
# of w_self was switched to self.
pass
# Main Fixedsize API
def store(self, w_self, index0, value):
raise NotImplementedError("Abstract method")
def fetch(self, w_self, index0):
raise NotImplementedError("Abstract method")
def size(self, w_self):
raise NotImplementedError("Abstract method")
# Fixedsize utility methods
def slice(self, w_self, start, end):
return [ self.fetch(w_self, i) for i in range(start, end)]
def fetch_all(self, w_self):
return self.slice(w_self, 0, self.size(w_self))
def store_all(self, w_self, elements):
for i, e in enumerate(elements):
self.store(w_self, i, e)
# Main Varsize API
def insert(self, w_self, index0, list_w):
raise NotImplementedError("Abstract method")
def delete(self, w_self, start, end):
raise NotImplementedError("Abstract method")
# Varsize utility methods
def append(self, w_self, list_w):
self.insert(w_self, self.size(w_self), list_w)
def pop(self, w_self, index0):
e = self.fetch(w_self, index0)
self.delete(w_self, index0, index0+1)
return e
# Internal methods
def _initialize_storage(self, w_self, initial_size):
raise NotImplementedError("Abstract method")
def _check_can_handle(self, value):
raise NotImplementedError("Abstract method")
def _convert_storage_to(self, w_self, new_strategy):
# This will be overwritten in _patch_strategy_class
new_strategy._convert_storage_from(w_self, self)
@jit.unroll_safe
def _convert_storage_from(self, w_self, previous_strategy):
# This is a very unefficient (but most generic) way to do this.
# Subclasses should specialize.
storage = previous_strategy.fetch_all(w_self)
self._initialize_storage(w_self, previous_strategy.size(w_self))
for i, field in enumerate(storage):
self.store(w_self, i, field)
def _generalize_for_value(self, w_self, value):
strategy_type = self.generalized_strategy_for(value)
new_instance = self.strategy_factory().switch_strategy(w_self, strategy_type, new_element=value)
return new_instance
def _cannot_handle_store(self, w_self, index0, value):
new_instance = self._generalize_for_value(w_self, value)
new_instance.store(w_self, index0, value)
def _cannot_handle_insert(self, w_self, index0, list_w):
# TODO - optimize. Prevent multiple generalizations and slicing done by callers.
new_strategy = self._generalize_for_value(w_self, list_w[0])
new_strategy.insert(w_self, index0, list_w)
# ============== Special Strategies with no storage array ==============
class EmptyStrategy(AbstractStrategy):
# == Required:
# See AbstractStrategy
def _initialize_storage(self, w_self, initial_size):
assert initial_size == 0
self.set_storage(w_self, None)
def _convert_storage_from(self, w_self, previous_strategy):
self.set_storage(w_self, None)
def _check_can_handle(self, value):
return False
def fetch(self, w_self, index0):
raise IndexError
def store(self, w_self, index0, value):
self._cannot_handle_store(w_self, index0, [value])
def insert(self, w_self, index0, list_w):
self._cannot_handle_insert(w_self, index0, list_w)
def delete(self, w_self, start, end):
self.check_index_range(w_self, start, end)
def size(self, w_self):
return 0
class SingleValueStrategyStorage(object):
"""Small container object for a size value."""
_attrs_ = ['size']
def __init__(self, size=0):
self.size = size
class SingleValueStrategy(AbstractStrategy):
# == Required:
# See AbstractStrategy
# check_index_*(...) - use mixin SafeIndexingMixin or UnsafeIndexingMixin
# value(self) - the single value contained in this strategy. Should be constant.
def _initialize_storage(self, w_self, initial_size):
storage_obj = SingleValueStrategyStorage(initial_size)
self.set_storage(w_self, storage_obj)
def _convert_storage_from(self, w_self, previous_strategy):
self._initialize_storage(w_self, previous_strategy.size(w_self))
def _check_can_handle(self, value):
return value is self.value()
def fetch(self, w_self, index0):
self.check_index_fetch(w_self, index0)
return self.value()
def store(self, w_self, index0, value):
self.check_index_store(w_self, index0)
if self._check_can_handle(value):
return
self._cannot_handle_store(w_self, index0, value)
def delete(self, w_self, start, end):
self.check_index_range(w_self, start, end)
self.get_storage(w_self).size -= (end - start)
def size(self, w_self):
return self.get_storage(w_self).size
@jit.unroll_safe
def insert(self, w_self, index0, list_w):
storage_obj = self.get_storage(w_self)
for i in range(len(list_w)):
if self._check_can_handle(list_w[i]):
storage_obj.size += 1
else:
self._cannot_handle_insert(w_self, index0 + i, list_w[i:])
return
# ============== Basic strategies with storage ==============
class StrategyWithStorage(AbstractStrategy):
# == Required:
# See AbstractStrategy
# check_index_*(...) - use mixin SafeIndexingMixin or UnsafeIndexingMixin
# default_value(self) - The value to be initially contained in this strategy
def _initialize_storage(self, w_self, initial_size):
default = self._unwrap(self.default_value())
self.set_storage(w_self, [default] * initial_size)
@jit.unroll_safe
def _convert_storage_from(self, w_self, previous_strategy):
size = previous_strategy.size(w_self)
new_storage = [ self._unwrap(previous_strategy.fetch(w_self, i))
for i in range(size) ]
self.set_storage(w_self, new_storage)
def store(self, w_self, index0, wrapped_value):
self.check_index_store(w_self, index0)
if self._check_can_handle(wrapped_value):
unwrapped = self._unwrap(wrapped_value)
self.get_storage(w_self)[index0] = unwrapped
else:
self._cannot_handle_store(w_self, index0, wrapped_value)
def fetch(self, w_self, index0):
self.check_index_fetch(w_self, index0)
unwrapped = self.get_storage(w_self)[index0]
return self._wrap(unwrapped)
def _wrap(self, value):
raise NotImplementedError("Abstract method")
def _unwrap(self, value):
raise NotImplementedError("Abstract method")
def size(self, w_self):
return len(self.get_storage(w_self))
@jit.unroll_safe
def insert(self, w_self, start, list_w):
# This is following Python's behaviour - insert automatically
# happens at the beginning of an array, even if index is larger
if start > self.size(w_self):
start = self.size(w_self)
for i in range(len(list_w)):
if self._check_can_handle(list_w[i]):
self.get_storage(w_self).insert(start + i, self._unwrap(list_w[i]))
else:
self._cannot_handle_insert(w_self, start + i, list_w[i:])
return
def delete(self, w_self, start, end):
self.check_index_range(w_self, start, end)
assert start >= 0 and end >= 0
del self.get_storage(w_self)[start : end]
class GenericStrategy(StrategyWithStorage):
# == Required:
# See StrategyWithStorage
def _wrap(self, value):
return value
def _unwrap(self, value):
return value
def _check_can_handle(self, wrapped_value):
return True
class WeakGenericStrategy(StrategyWithStorage):
# == Required:
# See StrategyWithStorage
def _wrap(self, value):
return value() or self.default_value()
def _unwrap(self, value):
assert value is not None
return weakref.ref(value)
def _check_can_handle(self, wrapped_value):
return True
# ============== Mixins for index checking operations ==============
class SafeIndexingMixin(object):
def check_index_store(self, w_self, index0):
self.check_index(w_self, index0)
def check_index_fetch(self, w_self, index0):
self.check_index(w_self, index0)
def check_index_range(self, w_self, start, end):
if end < start:
raise IndexError
self.check_index(w_self, start)
self.check_index(w_self, end)
def check_index(self, w_self, index0):
if index0 < 0 or index0 >= self.size(w_self):
raise IndexError
class UnsafeIndexingMixin(object):
def check_index_store(self, w_self, index0):
pass
def check_index_fetch(self, w_self, index0):
pass
def check_index_range(self, w_self, start, end):
pass
# ============== Specialized Storage Strategies ==============
class SpecializedStrategy(StrategyWithStorage):
# == Required:
# See StrategyWithStorage
# wrap(self, value) - Return a boxed object for the primitive value
# unwrap(self, value) - Return the unboxed primitive value of value
def _unwrap(self, value):
return self.unwrap(value)
def _wrap(self, value):
return self.wrap(value)
class SingleTypeStrategy(SpecializedStrategy):
# == Required Functions:
# See SpecializedStrategy
# contained_type - The wrapped type that can be stored in this strategy
def _check_can_handle(self, value):
return isinstance(value, self.contained_type)
class TaggingStrategy(SingleTypeStrategy):
"""This strategy uses a special tag value to represent a single additional object."""
# == Required:
# See SingleTypeStrategy
# wrapped_tagged_value(self) - The tagged object
# unwrapped_tagged_value(self) - The unwrapped tag value representing the tagged object
def _check_can_handle(self, value):
return value is self.wrapped_tagged_value() or \
(isinstance(value, self.contained_type) and \
self.unwrap(value) != self.unwrapped_tagged_value())
def _unwrap(self, value):
if value is self.wrapped_tagged_value():
return self.unwrapped_tagged_value()
return self.unwrap(value)
def _wrap(self, value):
if value == self.unwrapped_tagged_value():
return self.wrapped_tagged_value()
return self.wrap(value)
|
|
import copy
import logging
import os
import requests
import smtplib
import socket
import subprocess
import sys
from email.mime.text import MIMEText
import teuthology.lock.query
import teuthology.lock.util
from teuthology import repo_utils
from teuthology.config import config
from teuthology.exceptions import BranchNotFoundError, ScheduleFailError
from teuthology.misc import deep_merge
from teuthology.repo_utils import fetch_qa_suite, fetch_teuthology
from teuthology.orchestra.opsys import OS
from teuthology.packaging import get_builder_project
from teuthology.repo_utils import build_git_url
from teuthology.suite.build_matrix import combine_path
from teuthology.task.install import get_flavor
log = logging.getLogger(__name__)
CONTAINER_DISTRO = 'centos/8' # the one to check for build_complete
CONTAINER_FLAVOR = 'basic' # basic maps to default on shaman
def fetch_repos(branch, test_name):
"""
Fetch the suite repo (and also the teuthology repo) so that we can use it
to build jobs. Repos are stored in ~/src/.
The reason the teuthology repo is also fetched is that currently we use
subprocess to call teuthology-schedule to schedule jobs so we need to make
sure it is up-to-date. For that reason we always fetch the master branch
for test scheduling, regardless of what teuthology branch is requested for
testing.
:returns: The path to the suite repo on disk
"""
try:
# When a user is scheduling a test run from their own copy of
# teuthology, let's not wreak havoc on it.
if config.automated_scheduling:
# We use teuthology's master branch in all cases right now
if config.teuthology_path is None:
fetch_teuthology('master')
suite_repo_path = fetch_qa_suite(branch)
except BranchNotFoundError as exc:
schedule_fail(message=str(exc), name=test_name)
return suite_repo_path
def schedule_fail(message, name=''):
"""
If an email address has been specified anywhere, send an alert there. Then
raise a ScheduleFailError.
"""
email = config.results_email
if email:
subject = "Failed to schedule {name}".format(name=name)
msg = MIMEText(message)
msg['Subject'] = subject
msg['From'] = config.results_sending_email
msg['To'] = email
try:
smtp = smtplib.SMTP('localhost')
smtp.sendmail(msg['From'], [msg['To']], msg.as_string())
smtp.quit()
except socket.error:
log.exception("Failed to connect to mail server!")
raise ScheduleFailError(message, name)
def get_worker(machine_type):
"""
Map a given machine_type to a beanstalkd worker. If machine_type mentions
multiple machine types - e.g. 'plana,mira', then this returns 'multi'.
Otherwise it returns what was passed.
"""
if ',' in machine_type:
return 'multi'
else:
return machine_type
def get_gitbuilder_hash(project=None, branch=None, flavor=None,
machine_type=None, distro=None,
distro_version=None):
"""
Find the hash representing the head of the project's repository via
querying a gitbuilder repo.
Will return None in the case of a 404 or any other HTTP error.
"""
# Alternate method for github-hosted projects - left here for informational
# purposes
# resp = requests.get(
# 'https://api.github.com/repos/ceph/ceph/git/refs/heads/master')
# hash = .json()['object']['sha']
(arch, release, _os) = get_distro_defaults(distro, machine_type)
if distro is None:
distro = _os.name
bp = get_builder_project()(
project,
dict(
branch=branch,
flavor=flavor,
os_type=distro,
os_version=distro_version,
arch=arch,
),
)
return bp.sha1
def get_distro_defaults(distro, machine_type):
"""
Given a distro (e.g. 'ubuntu') and machine type, return:
(arch, release, pkg_type)
This is used to default to:
('x86_64', 'trusty', 'deb') when passed 'ubuntu' and 'plana'
('armv7l', 'saucy', 'deb') when passed 'ubuntu' and 'saya'
('x86_64', 'wheezy', 'deb') when passed 'debian'
('x86_64', 'fedora20', 'rpm') when passed 'fedora'
And ('x86_64', 'centos7', 'rpm') when passed anything else
"""
arch = 'x86_64'
if distro in (None, 'None'):
os_type = 'centos'
os_version = '7'
elif distro in ('rhel', 'centos'):
os_type = 'centos'
os_version = '7'
elif distro == 'ubuntu':
os_type = distro
if machine_type == 'saya':
os_version = '13.10'
arch = 'armv7l'
else:
os_version = '16.04'
elif distro == 'debian':
os_type = distro
os_version = '7'
elif distro == 'fedora':
os_type = distro
os_version = '20'
elif distro == 'opensuse':
os_type = distro
os_version = '15.1'
else:
raise ValueError("Invalid distro value passed: %s", distro)
_os = OS(name=os_type, version=os_version)
release = get_builder_project()._get_distro(
_os.name,
_os.version,
_os.codename,
)
template = "Defaults for machine_type {mtype} distro {distro}: " \
"arch={arch}, release={release}, pkg_type={pkg}"
log.debug(template.format(
mtype=machine_type,
distro=_os.name,
arch=arch,
release=release,
pkg=_os.package_type)
)
return (
arch,
release,
_os,
)
def git_ls_remote(project_or_url, branch, project_owner='ceph'):
"""
Find the latest sha1 for a given project's branch.
:param project_or_url: Either a project name or a full URL
:param branch: The branch to query
:param project_owner: The GitHub project owner. Only used when a project
name is passed; not when a URL is passed
:returns: The sha1 if found; else None
"""
if '://' in project_or_url:
url = project_or_url
else:
url = build_git_url(project_or_url, project_owner)
return repo_utils.ls_remote(url, branch)
def git_validate_sha1(project, sha1, project_owner='ceph'):
'''
Use http to validate that project contains sha1
I can't find a way to do this with git, period, so
we have specific urls to HEAD for github and git.ceph.com/gitweb
for now
'''
url = build_git_url(project, project_owner)
if '/github.com/' in url:
url = '/'.join((url, 'commit', sha1))
elif '/git.ceph.com/' in url:
# kinda specific to knowing git.ceph.com is gitweb
url = ('http://git.ceph.com/?p=%s.git;a=blob_plain;f=.gitignore;hb=%s'
% (project, sha1))
else:
raise RuntimeError(
'git_validate_sha1: how do I check %s for a sha1?' % url
)
resp = requests.head(url)
if resp.ok:
return sha1
return None
def git_branch_exists(project_or_url, branch, project_owner='ceph'):
"""
Query the git repository to check the existence of a project's branch
:param project_or_url: Either a project name or a full URL
:param branch: The branch to query
:param project_owner: The GitHub project owner. Only used when a project
name is passed; not when a URL is passed
"""
return git_ls_remote(project_or_url, branch, project_owner) is not None
def get_branch_info(project, branch, project_owner='ceph'):
"""
NOTE: This is currently not being used because of GitHub's API rate
limiting. We use github_branch_exists() instead.
Use the GitHub API to query a project's branch. Returns:
{u'object': {u'sha': <a_sha_string>,
u'type': <string>,
u'url': <url_to_commit>},
u'ref': u'refs/heads/<branch>',
u'url': <url_to_branch>}
We mainly use this to check if a branch exists.
"""
url_templ = 'https://api.github.com/repos/{project_owner}/{project}/git/refs/heads/{branch}' # noqa
url = url_templ.format(project_owner=project_owner, project=project,
branch=branch)
resp = requests.get(url)
if resp.ok:
return resp.json()
def package_version_for_hash(hash, kernel_flavor='basic', distro='rhel',
distro_version='8.0', machine_type='smithi'):
"""
Does what it says on the tin. Uses gitbuilder repos.
:returns: a string.
"""
(arch, release, _os) = get_distro_defaults(distro, machine_type)
if distro in (None, 'None'):
distro = _os.name
bp = get_builder_project()(
'ceph',
dict(
flavor=kernel_flavor,
os_type=distro,
os_version=distro_version,
arch=arch,
sha1=hash,
),
)
if bp.distro == CONTAINER_DISTRO and bp.flavor == CONTAINER_FLAVOR:
log.info('container build %s, checking for build_complete' % bp.distro)
if not bp.build_complete:
log.info('build not complete')
return None
return bp.version
def get_arch(machine_type):
"""
Based on a given machine_type, return its architecture by querying the lock
server.
:returns: A string or None
"""
result = teuthology.lock.query.list_locks(machine_type=machine_type, count=1)
if not result:
log.warn("No machines found with machine_type %s!", machine_type)
else:
return result[0]['arch']
def strip_fragment_path(original_path):
"""
Given a path, remove the text before '/suites/'. Part of the fix for
http://tracker.ceph.com/issues/15470
"""
scan_after = '/suites/'
scan_start = original_path.find(scan_after)
if scan_start > 0:
return original_path[scan_start + len(scan_after):]
return original_path
def get_install_task_flavor(job_config):
"""
Pokes through the install task's configuration (including its overrides) to
figure out which flavor it will want to install.
Only looks at the first instance of the install task in job_config.
"""
project, = job_config.get('project', 'ceph'),
tasks = job_config.get('tasks', dict())
overrides = job_config.get('overrides', dict())
install_overrides = overrides.get('install', dict())
project_overrides = install_overrides.get(project, dict())
first_install_config = dict()
for task in tasks:
if list(task.keys())[0] == 'install':
first_install_config = list(task.values())[0] or dict()
break
first_install_config = copy.deepcopy(first_install_config)
deep_merge(first_install_config, install_overrides)
deep_merge(first_install_config, project_overrides)
return get_flavor(first_install_config)
def get_package_versions(sha1, os_type, os_version, flavor,
package_versions=None):
"""
Will retrieve the package versions for the given sha1, os_type/version,
and flavor from gitbuilder.
Optionally, a package_versions dict can be provided
from previous calls to this function to avoid calling gitbuilder for
information we've already retrieved.
The package_versions dict will be in the following format::
{
"sha1": {
"ubuntu": {
"14.04": {
"basic": "version",
}
"15.04": {
"notcmalloc": "version",
}
}
"rhel": {
"basic": "version",
}
},
"another-sha1": {
"ubuntu": {
"basic": "version",
}
}
}
:param sha1: The sha1 hash of the ceph version.
:param os_type: The distro we want to get packages for, given
the ceph sha1. Ex. 'ubuntu', 'rhel', etc.
:param os_version: The distro's version, e.g. '14.04', '7.0'
:param flavor: Package flavor ('testing', 'notcmalloc', etc.)
:param package_versions: Use this optionally to use cached results of
previous calls to gitbuilder.
:returns: A dict of package versions. Will return versions
for all hashes/distros/vers, not just for the given
hash/distro/ver.
"""
if package_versions is None:
package_versions = dict()
os_type = str(os_type)
os_types = package_versions.get(sha1, dict())
os_versions = os_types.get(os_type, dict())
flavors = os_versions.get(os_version, dict())
if flavor not in flavors:
package_version = package_version_for_hash(
sha1,
flavor,
distro=os_type,
distro_version=os_version,
)
flavors[flavor] = package_version
os_versions[os_version] = flavors
os_types[os_type] = os_versions
package_versions[sha1] = os_types
return package_versions
def has_packages_for_distro(sha1, os_type, os_version, flavor,
package_versions=None):
"""
Checks to see if gitbuilder has packages for the given sha1, os_type and
kernel_flavor.
See above for package_versions description.
:param sha1: The sha1 hash of the ceph version.
:param os_type: The distro we want to get packages for, given
the ceph sha1. Ex. 'ubuntu', 'rhel', etc.
:param kernel_flavor: The kernel flavor
:param package_versions: Use this optionally to use cached results of
previous calls to gitbuilder.
:returns: True, if packages are found. False otherwise.
"""
os_type = str(os_type)
if package_versions is None:
package_versions = get_package_versions(
sha1, os_type, os_version, flavor)
flavors = package_versions.get(sha1, dict()).get(
os_type, dict()).get(
os_version, dict())
# we want to return a boolean here, not the actual package versions
return bool(flavors.get(flavor, None))
def teuthology_schedule(args, verbose, dry_run, log_prefix=''):
"""
Run teuthology-schedule to schedule individual jobs.
If --dry-run has been passed but --verbose has been passed just once, don't
actually run the command - only print what would be executed.
If --dry-run has been passed and --verbose has been passed multiple times,
do both.
"""
exec_path = os.path.join(
os.path.dirname(sys.argv[0]),
'teuthology-schedule')
args.insert(0, exec_path)
if dry_run:
# Quote any individual args so that individual commands can be copied
# and pasted in order to execute them individually.
printable_args = []
for item in args:
if ' ' in item:
printable_args.append("'%s'" % item)
else:
printable_args.append(item)
log.info('{0}{1}'.format(
log_prefix,
' '.join(printable_args),
))
if not dry_run or (dry_run and verbose > 1):
subprocess.check_call(args=args)
def find_git_parent(project, sha1):
base_url = config.githelper_base_url
if not base_url:
log.warning('githelper_base_url not set, --newest disabled')
return None
def refresh(project):
url = '%s/%s.git/refresh' % (base_url, project)
resp = requests.get(url)
if not resp.ok:
log.error('git refresh failed for %s: %s',
project, resp.content.decode())
def get_sha1s(project, committish, count):
url = '/'.join((base_url, '%s.git' % project,
'history/?committish=%s&count=%d' % (committish, count)))
resp = requests.get(url)
resp.raise_for_status()
sha1s = resp.json()['sha1s']
if len(sha1s) != count:
log.debug('got response: %s', resp.json())
log.error('can''t find %d parents of %s in %s: %s',
int(count), sha1, project, resp.json()['error'])
return sha1s
# XXX don't do this every time?..
refresh(project)
# we want the one just before sha1; list two, return the second
sha1s = get_sha1s(project, sha1, 2)
if len(sha1s) == 2:
return sha1s[1]
else:
return None
def filter_configs(configs, suite_name=None,
filter_in=None,
filter_out=None,
filter_all=None,
filter_fragments=True):
"""
Returns a generator for pairs of description and fragment paths.
Usage:
configs = build_matrix(path, subset, seed)
for description, fragments in filter_configs(configs):
pass
"""
for item in configs:
fragment_paths = item[1]
description = combine_path(suite_name, item[0]) \
if suite_name else item[0]
base_frag_paths = [strip_fragment_path(x)
for x in fragment_paths]
def matches(f):
if f in description:
return True
if filter_fragments and \
any(f in path for path in base_frag_paths):
return True
return False
if filter_all:
if not all(matches(f) for f in filter_all):
continue
if filter_in:
if not any(matches(f) for f in filter_in):
continue
if filter_out:
if any(matches(f) for f in filter_out):
continue
yield([description, fragment_paths])
|
|
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import ast
import inspect
import logging
from abc import abstractproperty
from builtins import bytes, str
from collections import OrderedDict
from future.utils import PY2
from twitter.common.collections import OrderedSet
from pants.engine.selectors import Get, type_or_constraint_repr
from pants.util.meta import AbstractClass
from pants.util.objects import Exactly, datatype
logger = logging.getLogger(__name__)
class _RuleVisitor(ast.NodeVisitor):
def __init__(self):
super(_RuleVisitor, self).__init__()
self.gets = []
def visit_Call(self, node):
if not isinstance(node.func, ast.Name) or node.func.id != Get.__name__:
return
self.gets.append(Get.extract_constraints(node))
class GoalProduct(object):
PRODUCT_MAP = {}
@staticmethod
def _synthesize_goal_product(name):
product_type_name = '{}GoalExecution'.format(name.capitalize())
if PY2:
product_type_name = product_type_name.encode('utf-8')
return type(product_type_name, (datatype(['result']),), {})
@classmethod
def for_name(cls, name):
assert isinstance(name, (bytes, str))
if name is bytes:
name = name.decode('utf-8')
if name not in cls.PRODUCT_MAP:
cls.PRODUCT_MAP[name] = cls._synthesize_goal_product(name)
return cls.PRODUCT_MAP[name]
def _make_rule(output_type, input_selectors, for_goal=None):
"""A @decorator that declares that a particular static function may be used as a TaskRule.
:param Constraint output_type: The return/output type for the Rule. This may be either a
concrete Python type, or an instance of `Exactly` representing a union of multiple types.
:param list input_selectors: A list of Selector instances that matches the number of arguments
to the @decorated function.
:param str for_goal: If this is a @console_rule, which goal string it's called for.
"""
def wrapper(func):
if not inspect.isfunction(func):
raise ValueError('The @rule decorator must be applied innermost of all decorators.')
caller_frame = inspect.stack()[1][0]
module_ast = ast.parse(inspect.getsource(func))
def resolve_type(name):
resolved = caller_frame.f_globals.get(name) or caller_frame.f_builtins.get(name)
if not isinstance(resolved, (type, Exactly)):
# TODO: should this say "...or Exactly instance;"?
raise ValueError('Expected either a `type` constructor or TypeConstraint instance; '
'got: {}'.format(name))
return resolved
gets = OrderedSet()
for node in ast.iter_child_nodes(module_ast):
if isinstance(node, ast.FunctionDef) and node.name == func.__name__:
rule_visitor = _RuleVisitor()
rule_visitor.visit(node)
gets.update(Get(resolve_type(p), resolve_type(s)) for p, s in rule_visitor.gets)
func._rule = TaskRule(output_type, input_selectors, func, input_gets=list(gets))
func.output_type = output_type
func.goal = for_goal
return func
return wrapper
def rule(output_type, input_selectors):
return _make_rule(output_type, input_selectors)
def console_rule(goal_name, input_selectors):
output_type = GoalProduct.for_name(goal_name)
return _make_rule(output_type, input_selectors, goal_name)
class Rule(AbstractClass):
"""Rules declare how to produce products for the product graph.
A rule describes what dependencies must be provided to produce a particular product. They also act
as factories for constructing the nodes within the graph.
"""
@abstractproperty
def output_constraint(self):
"""An output Constraint type for the rule."""
@abstractproperty
def input_selectors(self):
"""Collection of input selectors."""
class TaskRule(datatype(['output_constraint', 'input_selectors', 'input_gets', 'func']), Rule):
"""A Rule that runs a task function when all of its input selectors are satisfied.
TODO: Make input_gets non-optional when more/all rules are using them.
"""
def __new__(cls, output_type, input_selectors, func, input_gets=None):
# Validate result type.
if isinstance(output_type, Exactly):
constraint = output_type
elif isinstance(output_type, type):
constraint = Exactly(output_type)
else:
raise TypeError("Expected an output_type for rule `{}`, got: {}".format(
func.__name__, output_type))
# Validate selectors.
if not isinstance(input_selectors, list):
raise TypeError("Expected a list of Selectors for rule `{}`, got: {}".format(
func.__name__, type(input_selectors)))
# Validate gets.
input_gets = [] if input_gets is None else input_gets
if not isinstance(input_gets, list):
raise TypeError("Expected a list of Gets for rule `{}`, got: {}".format(
func.__name__, type(input_gets)))
# Create.
return super(TaskRule, cls).__new__(cls, constraint, tuple(input_selectors), tuple(input_gets), func)
def __str__(self):
return '({}, {!r}, {})'.format(type_or_constraint_repr(self.output_constraint),
self.input_selectors,
self.func.__name__)
class SingletonRule(datatype(['output_constraint', 'value']), Rule):
"""A default rule for a product, which is thus a singleton for that product."""
@classmethod
def from_instance(cls, obj):
return cls(type(obj), obj)
def __new__(cls, output_type, value):
# Validate result type.
if isinstance(output_type, Exactly):
constraint = output_type
elif isinstance(output_type, type):
constraint = Exactly(output_type)
else:
raise TypeError("Expected an output_type for rule; got: {}".format(output_type))
# Create.
return super(SingletonRule, cls).__new__(cls, constraint, value)
@property
def input_selectors(self):
return tuple()
def __repr__(self):
return '{}({}, {})'.format(type(self).__name__, type_or_constraint_repr(self.output_constraint), self.value)
class RootRule(datatype(['output_constraint']), Rule):
"""Represents a root input to an execution of a rule graph.
Roots act roughly like parameters, in that in some cases the only source of a
particular type might be when a value is provided as a root subject at the beginning
of an execution.
"""
def input_selectors(self):
return []
class RuleIndex(datatype(['rules', 'roots'])):
"""Holds an index of Tasks and Singletons used to instantiate Nodes."""
@classmethod
def create(cls, rule_entries):
"""Creates a RuleIndex with tasks indexed by their output type."""
# NB make tasks ordered so that gen ordering is deterministic.
serializable_rules = OrderedDict()
serializable_roots = set()
def add_task(product_type, rule):
if product_type not in serializable_rules:
serializable_rules[product_type] = OrderedSet()
serializable_rules[product_type].add(rule)
def add_rule(rule):
if isinstance(rule, RootRule):
serializable_roots.add(rule.output_constraint)
return
# TODO: Ensure that interior types work by indexing on the list of types in
# the constraint. This heterogenity has some confusing implications:
# see https://github.com/pantsbuild/pants/issues/4005
for kind in rule.output_constraint.types:
add_task(kind, rule)
add_task(rule.output_constraint, rule)
for entry in rule_entries:
if isinstance(entry, Rule):
add_rule(entry)
elif hasattr(entry, '__call__'):
rule = getattr(entry, '_rule', None)
if rule is None:
raise TypeError("Expected callable {} to be decorated with @rule.".format(entry))
add_rule(rule)
else:
raise TypeError("Unexpected rule type: {}. "
"Rules either extend Rule, or are static functions "
"decorated with @rule.".format(type(entry)))
return cls(serializable_rules, serializable_roots)
|
|
import numpy as np
import rdkit
import tensorflow as tf
from tensorflow.python.framework import test_util
from deepchem.feat.graph_features import ConvMolFeaturizer
from deepchem.feat.mol_graphs import ConvMol
from deepchem.models.tensorgraph.layers import Add, MaxPool2D, MaxPool3D, GraphCNN, GraphEmbedPoolLayer, Cast
from deepchem.models.tensorgraph.layers import AlphaShareLayer
from deepchem.models.tensorgraph.layers import AttnLSTMEmbedding
from deepchem.models.tensorgraph.layers import BatchNorm
from deepchem.models.tensorgraph.layers import BetaShare
from deepchem.models.tensorgraph.layers import CombineMeanStd
from deepchem.models.tensorgraph.layers import Concat
from deepchem.models.tensorgraph.layers import Constant
from deepchem.models.tensorgraph.layers import Conv1D, Squeeze
from deepchem.models.tensorgraph.layers import Conv2D
from deepchem.models.tensorgraph.layers import Conv2DTranspose
from deepchem.models.tensorgraph.layers import Conv3D
from deepchem.models.tensorgraph.layers import Conv3DTranspose
from deepchem.models.tensorgraph.layers import Dense
from deepchem.models.tensorgraph.layers import Exp
from deepchem.models.tensorgraph.layers import Flatten
from deepchem.models.tensorgraph.layers import GRU
from deepchem.models.tensorgraph.layers import Gather
from deepchem.models.tensorgraph.layers import GraphConv
from deepchem.models.tensorgraph.layers import GraphGather
from deepchem.models.tensorgraph.layers import HingeLoss
from deepchem.models.tensorgraph.layers import Input
from deepchem.models.tensorgraph.layers import InputFifoQueue
from deepchem.models.tensorgraph.layers import InteratomicL2Distances
from deepchem.models.tensorgraph.layers import IterRefLSTMEmbedding
from deepchem.models.tensorgraph.layers import L2Loss
from deepchem.models.tensorgraph.layers import LSTM
from deepchem.models.tensorgraph.layers import LSTMStep
from deepchem.models.tensorgraph.layers import Log
from deepchem.models.tensorgraph.layers import Multiply
from deepchem.models.tensorgraph.layers import ReduceMean
from deepchem.models.tensorgraph.layers import ReduceSquareDifference
from deepchem.models.tensorgraph.layers import ReduceSum
from deepchem.models.tensorgraph.layers import ReLU
from deepchem.models.tensorgraph.layers import Repeat
from deepchem.models.tensorgraph.layers import Reshape
from deepchem.models.tensorgraph.layers import SluiceLoss
from deepchem.models.tensorgraph.layers import Sigmoid
from deepchem.models.tensorgraph.layers import SigmoidCrossEntropy
from deepchem.models.tensorgraph.layers import SoftMax
from deepchem.models.tensorgraph.layers import SoftMaxCrossEntropy
from deepchem.models.tensorgraph.layers import SparseSoftMaxCrossEntropy
from deepchem.models.tensorgraph.layers import StopGradient
from deepchem.models.tensorgraph.layers import TensorWrapper
from deepchem.models.tensorgraph.layers import TimeSeriesDense
from deepchem.models.tensorgraph.layers import ToFloat
from deepchem.models.tensorgraph.layers import Transpose
from deepchem.models.tensorgraph.layers import Variable
from deepchem.models.tensorgraph.layers import VinaFreeEnergy
from deepchem.models.tensorgraph.layers import WeightedError
from deepchem.models.tensorgraph.layers import WeightedLinearCombo
from deepchem.models.tensorgraph.IRV import IRVLayer
from deepchem.models.tensorgraph.IRV import IRVRegularize
from deepchem.models.tensorgraph.IRV import Slice
class TestLayers(test_util.TensorFlowTestCase):
"""
Test that layers function as intended.
"""
def test_conv_1D(self):
"""Test that Conv1D can be invoked."""
width = 5
in_channels = 2
filters = 3
kernel_size = 2
batch_size = 10
in_tensor = np.random.rand(batch_size, width, in_channels)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = Conv1D(filters, kernel_size)(in_tensor)
sess.run(tf.global_variables_initializer())
out_tensor = out_tensor.eval()
self.assertEqual(out_tensor.shape[0], batch_size)
self.assertEqual(out_tensor.shape[2], filters)
def test_dense(self):
"""Test that Dense can be invoked."""
in_dim = 2
out_dim = 3
batch_size = 10
in_tensor = np.random.rand(batch_size, in_dim)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = Dense(out_dim)(in_tensor)
sess.run(tf.global_variables_initializer())
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, out_dim)
def test_flatten(self):
"""Test that Flatten can be invoked."""
in_dim_1 = 2
in_dim_2 = 2
out_dim = 4
batch_size = 10
in_tensor = np.random.rand(batch_size, in_dim_1, in_dim_2)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = Flatten()(in_tensor)
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, out_dim)
def test_reshape(self):
"""Test that Reshape can be invoked."""
in_dim_1 = 2
in_dim_2 = 2
out_dim = 4
batch_size = 10
in_tensor = np.random.rand(batch_size, in_dim_1, in_dim_2)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = Reshape((batch_size, out_dim))(in_tensor)
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, out_dim)
def test_transpose(self):
"""Test that Transpose can be invoked."""
in_dim_1 = 2
in_dim_2 = 7
batch_size = 10
in_tensor = np.random.rand(batch_size, in_dim_1, in_dim_2)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = Transpose((0, 2, 1))(in_tensor)
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, in_dim_2, in_dim_1)
def test_combine_mean_std(self):
"""Test that Transpose can be invoked."""
dim = 2
batch_size = 10
mean_tensor = np.random.rand(dim)
std_tensor = np.random.rand(1,)
with self.test_session() as sess:
mean_tensor = tf.convert_to_tensor(mean_tensor, dtype=tf.float32)
std_tensor = tf.convert_to_tensor(std_tensor, dtype=tf.float32)
out_tensor = CombineMeanStd()(mean_tensor, std_tensor)
out_tensor = out_tensor.eval()
assert out_tensor.shape == (dim,)
def test_repeat(self):
"""Test that Repeat can be invoked."""
in_dim = 4
batch_size = 10
n_repeat = 2
in_tensor = np.random.rand(batch_size, in_dim)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = Repeat(n_repeat)(in_tensor)
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, n_repeat, in_dim)
def test_gather(self):
"""Test that Gather can be invoked."""
in_tensor = np.random.uniform(size=(5, 4)).astype(np.float32)
with self.test_session() as sess:
out_tensor = Gather(indices=[[2], [3]])(in_tensor).eval()
assert np.array_equal([in_tensor[2], in_tensor[3]], out_tensor)
out_tensor = Gather()(in_tensor, np.array([[1, 1], [0, 3]])).eval()
assert np.array_equal([in_tensor[1, 1], in_tensor[0, 3]], out_tensor)
def test_gru(self):
"""Test that GRU can be invoked."""
batch_size = 10
n_hidden = 7
in_channels = 4
n_repeat = 2
n_steps = 6
in_tensor = np.random.rand(batch_size, n_steps, in_channels)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = GRU(n_hidden, batch_size)(in_tensor)
sess.run(tf.global_variables_initializer())
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, n_steps, n_hidden)
def test_lstm(self):
"""Test that LSTM can be invoked."""
batch_size = 10
n_hidden = 7
in_channels = 4
n_repeat = 2
n_steps = 6
in_tensor = np.random.rand(batch_size, n_steps, in_channels)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = LSTM(n_hidden, batch_size)(in_tensor)
sess.run(tf.global_variables_initializer())
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, n_steps, n_hidden)
def test_time_series_dense(self):
"""Test that TimeSeriesDense can be invoked."""
batch_size = 10
n_hidden = 7
in_channels = 4
out_channels = 5
n_repeat = 2
n_steps = 6
in_tensor = np.random.rand(batch_size, n_steps, in_channels)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = TimeSeriesDense(out_channels)(in_tensor)
assert out_tensor.shape == (batch_size, n_steps, out_channels)
def test_input(self):
"""Test that Input can be invoked."""
in_shape = (4, 3)
with self.test_session() as sess:
out_tensor = Input(in_shape)()
def test_l2_loss(self):
"""Test that L2Loss can be invoked."""
batch_size = 10
n_features = 5
guess_tensor = np.random.rand(batch_size, n_features)
label_tensor = np.random.rand(batch_size, n_features)
with self.test_session() as sess:
guess_tensor = tf.convert_to_tensor(guess_tensor, dtype=tf.float32)
label_tensor = tf.convert_to_tensor(label_tensor, dtype=tf.float32)
out_tensor = L2Loss()(guess_tensor, label_tensor)
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size,)
def test_relu(self):
"""Test that Sigmoid can be invoked."""
batch_size = 10
n_features = 5
in_tensor = np.random.rand(batch_size, n_features)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = ReLU()(in_tensor)
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, n_features)
def test_sigmoid(self):
"""Test that Sigmoid can be invoked."""
batch_size = 10
n_features = 5
in_tensor = np.random.rand(batch_size, n_features)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = Sigmoid()(in_tensor)
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, n_features)
def test_softmax(self):
"""Test that Softmax can be invoked."""
batch_size = 10
n_features = 5
in_tensor = np.random.rand(batch_size, n_features)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = SoftMax()(in_tensor)
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, n_features)
def test_concat(self):
"""Test that Concat can be invoked."""
batch_size = 10
n_features = 5
in_tensor_1 = np.random.rand(batch_size, n_features)
in_tensor_2 = np.random.rand(batch_size, n_features)
with self.test_session() as sess:
in_tensor_1 = tf.convert_to_tensor(in_tensor_1, dtype=tf.float32)
in_tensor_2 = tf.convert_to_tensor(in_tensor_2, dtype=tf.float32)
out_tensor = Concat(axis=1)(in_tensor_1, in_tensor_2)
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, 2 * n_features)
def test_constant(self):
"""Test that Constant can be invoked."""
value = np.random.uniform(size=(2, 3)).astype(np.float32)
with self.test_session() as sess:
out_tensor = Constant(value)()
assert np.array_equal(value, out_tensor.eval())
def test_variable(self):
"""Test that Variable can be invoked."""
value = np.random.uniform(size=(2, 3)).astype(np.float32)
with self.test_session() as sess:
out_tensor = Variable(value)()
sess.run(tf.global_variables_initializer())
assert np.array_equal(value, out_tensor.eval())
def test_stop_gradient(self):
"""Test that StopGradient can be invoked."""
batch_size = 10
n_features = 5
in_tensor = np.random.rand(batch_size, n_features)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = StopGradient()(in_tensor)
assert np.array_equal(in_tensor.eval(), out_tensor.eval())
def test_add(self):
"""Test that Add can be invoked."""
value1 = np.random.uniform(size=(2, 3)).astype(np.float32)
value2 = np.random.uniform(size=(2, 3)).astype(np.float32)
value3 = np.random.uniform(size=(2, 3)).astype(np.float32)
with self.test_session() as sess:
out_tensor = Add(weights=[1, 2, 1])(tf.constant(value1),
tf.constant(value2),
tf.constant(value3))
assert np.array_equal(value1 + 2 * value2 + value3, out_tensor.eval())
def test_multiply(self):
"""Test that Multiply can be invoked."""
value1 = np.random.uniform(size=(2, 3)).astype(np.float32)
value2 = np.random.uniform(size=(2, 3)).astype(np.float32)
value3 = np.random.uniform(size=(2, 3)).astype(np.float32)
with self.test_session() as sess:
out_tensor = Multiply()(tf.constant(value1), tf.constant(value2),
tf.constant(value3))
assert np.array_equal(value1 * value2 * value3, out_tensor.eval())
def test_log(self):
"""Test that Log can be invoked."""
value = np.random.uniform(size=(2, 3)).astype(np.float32)
with self.test_session() as sess:
result = Log()(value).eval()
assert np.array_equal(np.log(value), result)
assert np.all(np.isclose(np.log(value), result, atol=0.001))
def test_exp(self):
"""Test that Exp can be invoked."""
value = np.random.uniform(size=(2, 3)).astype(np.float32)
with self.test_session() as sess:
result = Exp()(value).eval()
assert np.array_equal(np.exp(value), result)
def test_interatomic_distances(self):
"""Test that the interatomic distance calculation works."""
N_atoms = 5
M_nbrs = 2
ndim = 3
with self.test_session() as sess:
coords = np.random.rand(N_atoms, ndim)
nbr_list = np.random.randint(0, N_atoms, size=(N_atoms, M_nbrs))
coords_tensor = tf.convert_to_tensor(coords)
nbr_list_tensor = tf.convert_to_tensor(nbr_list)
dist_tensor = InteratomicL2Distances(N_atoms, M_nbrs,
ndim)(coords_tensor, nbr_list_tensor)
dists = dist_tensor.eval()
assert dists.shape == (N_atoms, M_nbrs)
def test_sigmoid_cross_entropy(self):
"""Test that SigmoidCrossEntropy can be invoked."""
batch_size = 10
n_features = 5
logit_tensor = np.random.rand(batch_size, n_features)
label_tensor = np.random.randint(0, 2, (batch_size, n_features))
with self.test_session() as sess:
logit_tensor = tf.convert_to_tensor(logit_tensor, dtype=tf.float32)
label_tensor = tf.convert_to_tensor(label_tensor, dtype=tf.float32)
out_tensor = SigmoidCrossEntropy()(label_tensor, logit_tensor)
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, n_features)
def test_softmax_cross_entropy(self):
"""Test that SoftMaxCrossEntropy can be invoked."""
batch_size = 10
n_features = 5
logit_tensor = np.random.rand(batch_size, n_features)
label_tensor = np.random.rand(batch_size, n_features)
with self.test_session() as sess:
logit_tensor = tf.convert_to_tensor(logit_tensor, dtype=tf.float32)
label_tensor = tf.convert_to_tensor(label_tensor, dtype=tf.float32)
out_tensor = SoftMaxCrossEntropy()(logit_tensor, label_tensor)
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size,)
def test_sparse_softmax_cross_entropy(self):
batch_size = 10
n_features = 5
logit_tensor = np.random.rand(batch_size, n_features)
label_tensor = np.random.rand(batch_size)
with self.test_session() as sess:
logit_tensor = tf.convert_to_tensor(logit_tensor, dtype=tf.float32)
label_tensor = tf.convert_to_tensor(label_tensor, dtype=tf.int32)
out_tensor = SparseSoftMaxCrossEntropy()(label_tensor, logit_tensor)
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size,)
def test_reduce_mean(self):
"""Test that ReduceMean can be invoked."""
batch_size = 10
n_features = 5
in_tensor = np.random.rand(batch_size, n_features)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = ReduceMean()(in_tensor)
out_tensor = out_tensor.eval()
assert isinstance(out_tensor, np.float32)
def test_to_float(self):
"""Test that ToFloat can be invoked."""
batch_size = 10
n_features = 5
in_tensor = np.random.rand(batch_size, n_features)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = ToFloat()(in_tensor)
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, n_features)
def test_reduce_sum(self):
"""Test that ReduceSum can be invoked."""
batch_size = 10
n_features = 5
in_tensor = np.random.rand(batch_size, n_features)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = ReduceSum()(in_tensor)
out_tensor = out_tensor.eval()
assert isinstance(out_tensor, np.float32)
def test_reduce_square_difference(self):
"""Test that ReduceSquareDifference can be invoked."""
batch_size = 10
n_features = 5
in_tensor_1 = np.random.rand(batch_size, n_features)
in_tensor_2 = np.random.rand(batch_size, n_features)
with self.test_session() as sess:
in_tensor_1 = tf.convert_to_tensor(in_tensor_1, dtype=tf.float32)
in_tensor_2 = tf.convert_to_tensor(in_tensor_2, dtype=tf.float32)
out_tensor = ReduceSquareDifference()(in_tensor_1, in_tensor_2)
out_tensor = out_tensor.eval()
assert isinstance(out_tensor, np.float32)
def test_conv_2D(self):
"""Test that Conv2D can be invoked."""
length = 4
width = 5
in_channels = 2
out_channels = 3
batch_size = 20
in_tensor = np.random.rand(batch_size, length, width, in_channels)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = Conv2D(out_channels, kernel_size=1)(in_tensor)
sess.run(tf.global_variables_initializer())
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, length, width, out_channels)
def test_conv_3D(self):
"""Test that Conv3D can be invoked."""
length = 4
width = 5
depth = 6
in_channels = 2
out_channels = 3
batch_size = 20
in_tensor = np.random.rand(batch_size, length, width, depth, in_channels)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = Conv3D(out_channels, kernel_size=1)(in_tensor)
sess.run(tf.global_variables_initializer())
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, length, width, depth,
out_channels)
def test_conv_2D_transpose(self):
"""Test that Conv2DTranspose can be invoked."""
length = 4
width = 5
in_channels = 2
out_channels = 3
batch_size = 20
in_tensor = np.random.rand(batch_size, length, width, in_channels)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = Conv2DTranspose(
out_channels, kernel_size=1, stride=2)(in_tensor)
sess.run(tf.global_variables_initializer())
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, 2 * length, 2 * width,
out_channels)
def test_conv_3D_transpose(self):
"""Test that Conv3DTranspose can be invoked."""
length = 4
width = 5
depth = 6
in_channels = 2
out_channels = 3
batch_size = 20
in_tensor = np.random.rand(batch_size, length, width, depth, in_channels)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = Conv3DTranspose(
out_channels, kernel_size=1, stride=(2, 3, 1))(in_tensor)
sess.run(tf.global_variables_initializer())
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, 2 * length, 3 * width, depth,
out_channels)
def test_maxpool2D(self):
"""Test that MaxPool2D can be invoked."""
length = 2
width = 2
in_channels = 2
batch_size = 20
in_tensor = np.random.rand(batch_size, length, width, in_channels)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = MaxPool2D()(in_tensor)
sess.run(tf.global_variables_initializer())
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, 1, 1, in_channels)
def test_max_pool_3D(self):
"""Test that MaxPool3D can be invoked."""
length = 2
width = 2
depth = 2
in_channels = 2
batch_size = 20
in_tensor = np.random.rand(batch_size, length, width, depth, in_channels)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = MaxPool3D()(in_tensor)
sess.run(tf.global_variables_initializer())
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, 1, 1, 1, in_channels)
def test_input_fifo_queue(self):
"""Test InputFifoQueue can be invoked."""
batch_size = 10
n_features = 5
in_tensor = np.random.rand(batch_size, n_features)
tf.reset_default_graph()
with self.test_session() as sess:
in_tensor = TensorWrapper(
tf.convert_to_tensor(in_tensor, dtype=tf.float32), name="input")
InputFifoQueue([(batch_size, n_features)], ["input"])(in_tensor)
def test_graph_conv(self):
"""Test that GraphConv can be invoked."""
out_channels = 2
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = ConvMolFeaturizer()
mols = featurizer.featurize(mols)
multi_mol = ConvMol.agglomerate_mols(mols)
atom_features = multi_mol.get_atom_features()
degree_slice = multi_mol.deg_slice
membership = multi_mol.membership
deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]
with self.test_session() as sess:
atom_features = tf.convert_to_tensor(atom_features, dtype=tf.float32)
degree_slice = tf.convert_to_tensor(degree_slice, dtype=tf.int32)
membership = tf.convert_to_tensor(membership, dtype=tf.int32)
deg_adjs_tf = []
for deg_adj in deg_adjs:
deg_adjs_tf.append(tf.convert_to_tensor(deg_adj, dtype=tf.int32))
args = [atom_features, degree_slice, membership] + deg_adjs_tf
out_tensor = GraphConv(out_channels)(*args)
sess.run(tf.global_variables_initializer())
out_tensor = out_tensor.eval()
assert out_tensor.shape == (n_atoms, out_channels)
def test_lstm_step(self):
"""Test that LSTMStep computation works properly."""
max_depth = 5
n_test = 5
n_feat = 10
y = np.random.rand(n_test, 2 * n_feat)
state_zero = np.random.rand(n_test, n_feat)
state_one = np.random.rand(n_test, n_feat)
with self.test_session() as sess:
y = tf.convert_to_tensor(y, dtype=tf.float32)
state_zero = tf.convert_to_tensor(state_zero, dtype=tf.float32)
state_one = tf.convert_to_tensor(state_one, dtype=tf.float32)
lstm = LSTMStep(n_feat, 2 * n_feat)
out_tensor = lstm(y, state_zero, state_one)
sess.run(tf.global_variables_initializer())
h_out, h_copy_out, c_out = (out_tensor[0].eval(), out_tensor[1][0].eval(),
out_tensor[1][1].eval())
assert h_out.shape == (n_test, n_feat)
assert h_copy_out.shape == (n_test, n_feat)
assert c_out.shape == (n_test, n_feat)
def test_attn_lstm_embedding(self):
"""Test that attention LSTM computation works properly."""
max_depth = 5
n_test = 5
n_support = 11
n_feat = 10
test = np.random.rand(n_test, n_feat)
support = np.random.rand(n_support, n_feat)
with self.test_session() as sess:
test = tf.convert_to_tensor(test, dtype=tf.float32)
support = tf.convert_to_tensor(support, dtype=tf.float32)
attn_embedding_layer = AttnLSTMEmbedding(n_test, n_support, n_feat,
max_depth)
out_tensor = attn_embedding_layer(test, support)
sess.run(tf.global_variables_initializer())
test_out, support_out = out_tensor[0].eval(), out_tensor[1].eval()
assert test_out.shape == (n_test, n_feat)
assert support_out.shape == (n_support, n_feat)
def test_iter_ref_lstm_embedding(self):
"""Test that IterRef LSTM computation works properly."""
max_depth = 5
n_test = 5
n_support = 11
n_feat = 10
test = np.random.rand(n_test, n_feat)
support = np.random.rand(n_support, n_feat)
with self.test_session() as sess:
test = tf.convert_to_tensor(test, dtype=tf.float32)
support = tf.convert_to_tensor(support, dtype=tf.float32)
iter_ref_embedding_layer = IterRefLSTMEmbedding(n_test, n_support, n_feat,
max_depth)
out_tensor = iter_ref_embedding_layer(test, support)
sess.run(tf.global_variables_initializer())
test_out, support_out = out_tensor[0].eval(), out_tensor[1].eval()
assert test_out.shape == (n_test, n_feat)
assert support_out.shape == (n_support, n_feat)
# TODO(rbharath): This test should pass. Fix it!
# def test_graph_pool(self):
# """Test that GraphPool can be invoked."""
# out_channels = 2
# n_atoms = 4 # In CCC and C, there are 4 atoms
# raw_smiles = ['CCC', 'C']
# mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
# featurizer = ConvMolFeaturizer()
# mols = featurizer.featurize(mols)
# multi_mol = ConvMol.agglomerate_mols(mols)
# atom_features = multi_mol.get_atom_features()
# degree_slice = multi_mol.deg_slice
# membership = multi_mol.membership
# deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]
# with self.test_session() as sess:
# atom_features = tf.convert_to_tensor(atom_features, dtype=tf.float32)
# degree_slice = tf.convert_to_tensor(degree_slice, dtype=tf.int32)
# membership = tf.convert_to_tensor(membership, dtype=tf.int32)
# deg_adjs_tf = []
# for deg_adj in deg_adjs:
# deg_adjs_tf.append(tf.convert_to_tensor(deg_adj, dtype=tf.int32))
# args = [atom_features, degree_slice, membership] + deg_adjs_tf
# out_tensor = GraphPool(out_channels)(*args)
# sess.run(tf.global_variables_initializer())
# out_tensor = out_tensor.eval()
# assert out_tensor.shape == (n_atoms, out_channels)
def test_graph_gather(self):
"""Test that GraphGather can be invoked."""
batch_size = 2
n_features = 75
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = ConvMolFeaturizer()
mols = featurizer.featurize(mols)
multi_mol = ConvMol.agglomerate_mols(mols)
atom_features = multi_mol.get_atom_features()
degree_slice = multi_mol.deg_slice
membership = multi_mol.membership
deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]
with self.test_session() as sess:
atom_features = tf.convert_to_tensor(atom_features, dtype=tf.float32)
degree_slice = tf.convert_to_tensor(degree_slice, dtype=tf.int32)
membership = tf.convert_to_tensor(membership, dtype=tf.int32)
deg_adjs_tf = []
for deg_adj in deg_adjs:
deg_adjs_tf.append(tf.convert_to_tensor(deg_adj, dtype=tf.int32))
args = [atom_features, degree_slice, membership] + deg_adjs_tf
out_tensor = GraphGather(batch_size)(*args)
sess.run(tf.global_variables_initializer())
out_tensor = out_tensor.eval()
# TODO(rbharath): Why is it 2*n_features instead of n_features?
assert out_tensor.shape == (batch_size, 2 * n_features)
def test_batch_norm(self):
"""Test that BatchNorm can be invoked."""
batch_size = 10
n_features = 5
in_tensor = np.random.rand(batch_size, n_features)
with self.test_session() as sess:
in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
out_tensor = BatchNorm()(in_tensor)
sess.run(tf.global_variables_initializer())
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, n_features)
def test_weighted_error(self):
"""Test that WeightedError can be invoked."""
batch_size = 10
n_features = 5
guess_tensor = np.random.rand(batch_size, n_features)
label_tensor = np.random.rand(batch_size, n_features)
with self.test_session() as sess:
guess_tensor = tf.convert_to_tensor(guess_tensor, dtype=tf.float32)
label_tensor = tf.convert_to_tensor(label_tensor, dtype=tf.float32)
out_tensor = WeightedError()(guess_tensor, label_tensor)
out_tensor = out_tensor.eval()
assert isinstance(out_tensor, np.float32)
def test_vina_free_energy(self):
"""Test that VinaFreeEnergy can be invoked."""
n_atoms = 5
m_nbrs = 1
ndim = 3
nbr_cutoff = 1
start = 0
stop = 4
X_tensor = np.random.rand(n_atoms, ndim)
Z_tensor = np.random.randint(0, 2, (n_atoms))
with self.test_session() as sess:
X_tensor = tf.convert_to_tensor(X_tensor, dtype=tf.float32)
Z_tensor = tf.convert_to_tensor(Z_tensor, dtype=tf.float32)
out_tensor = VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff, start,
stop)(X_tensor, Z_tensor)
sess.run(tf.global_variables_initializer())
out_tensor = out_tensor.eval()
assert isinstance(out_tensor, np.float32)
def test_weighted_linear_combo(self):
"""Test that WeightedLinearCombo can be invoked."""
batch_size = 10
n_features = 5
in_tensor_1 = np.random.rand(batch_size, n_features)
in_tensor_2 = np.random.rand(batch_size, n_features)
with self.test_session() as sess:
in_tensor_1 = tf.convert_to_tensor(in_tensor_1, dtype=tf.float32)
in_tensor_2 = tf.convert_to_tensor(in_tensor_2, dtype=tf.float32)
out_tensor = WeightedLinearCombo()(in_tensor_1, in_tensor_2)
sess.run(tf.global_variables_initializer())
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, n_features)
def test_reshape_inputs(self):
"""Test that layers can automatically reshape inconsistent inputs."""
value1 = np.random.uniform(size=(2, 3)).astype(np.float32)
value2 = np.random.uniform(size=(1, 6, 1)).astype(np.float32)
with self.test_session() as sess:
out_tensor = ReduceSquareDifference()(tf.constant(value1),
tf.constant(value2))
result = out_tensor.eval()
diff = value1.reshape((1, 6, 1)) - value2
loss = np.mean(diff**2)
assert (loss - result) / loss < 1e-6
def test_cast(self):
"""Test that layers can automatically reshape inconsistent inputs."""
value1 = np.random.uniform(size=(2, 1)).astype(np.float32)
with self.test_session() as sess:
out_tensor = Cast(dtype=tf.int32)(tf.constant(value1))
result = out_tensor.eval()
assert result.dtype == np.int32
def test_squeeze_inputs(self):
"""Test that layers can automatically reshape inconsistent inputs."""
value1 = np.random.uniform(size=(2, 1)).astype(np.float32)
with self.test_session() as sess:
out_tensor = Squeeze(squeeze_dims=1)(tf.constant(value1))
result = out_tensor.eval()
assert result.shape == (2,)
def test_convert_to_tensor(self):
"""Test implicit conversion of Layers to Tensors."""
v = Variable(np.array([1.5]))
v.create_tensor()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
result = sess.run(v)
assert result == 1.5
result = sess.run(tf.gradients(v, v))
assert result[0] == 1.0
def test_alpha_share_layer(self):
"""Test that alpha share works correctly"""
batch_size = 50
length = 10
test_1 = np.random.rand(batch_size, length)
test_2 = np.random.rand(batch_size, length)
with self.test_session() as sess:
test_1 = tf.convert_to_tensor(test_1, dtype=tf.float32)
test_2 = tf.convert_to_tensor(test_2, dtype=tf.float32)
out_tensor = AlphaShareLayer()(test_1, test_2)
sess.run(tf.global_variables_initializer())
test_1_out_tensor = out_tensor[0].eval()
test_2_out_tensor = out_tensor[1].eval()
assert test_1.shape == test_1_out_tensor.shape
assert test_2.shape == test_2_out_tensor.shape
def test_beta_share(self):
"""Test that beta share works correctly"""
batch_size = 50
length = 10
test_1 = np.random.rand(batch_size, length)
test_2 = np.random.rand(batch_size, length)
with self.test_session() as sess:
test_1 = tf.convert_to_tensor(test_1, dtype=tf.float32)
test_2 = tf.convert_to_tensor(test_2, dtype=tf.float32)
out_tensor = BetaShare()(test_1, test_2)
sess.run(tf.global_variables_initializer())
out_tensor.eval()
assert test_1.shape == out_tensor.shape
assert test_2.shape == out_tensor.shape
def test_sluice_loss(self):
"""Test the sluice loss function"""
input1 = np.ones((3, 4))
input2 = np.ones((2, 2))
with self.test_session() as sess:
input1 = tf.convert_to_tensor(input1, dtype=tf.float32)
input2 = tf.convert_to_tensor(input2, dtype=tf.float32)
output_tensor = SluiceLoss()(input1, input2)
sess.run(tf.global_variables_initializer())
assert output_tensor.eval() == 40.0
def test_graphcnn(self):
""" Test GraphCNN Layer From https://arxiv.org/abs/1703.00792"""
V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)
adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)
with self.test_session() as sess:
out_tensor = GraphCNN(num_filters=6)(V, adjs)
sess.run(tf.global_variables_initializer())
result = out_tensor.eval()
assert result.shape == (10, 100, 6)
def test_graphcnnpool(self):
""" Test GraphCNNPool Layer From https://arxiv.org/abs/1703.00792"""
V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)
adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)
with self.test_session() as sess:
vertex_props, adjs = GraphEmbedPoolLayer(num_vertices=6)(V, adjs)
sess.run(tf.global_variables_initializer())
vertex_props, adjs = vertex_props.eval(), adjs.eval()
assert vertex_props.shape == (10, 6, 50)
assert adjs.shape == (10, 6, 5, 6)
def test_slice(self):
"""Test that Slice can be invoked."""
batch_size = 10
n_features = 5
test_tensor_input = np.random.rand(batch_size, n_features)
with self.test_session() as sess:
test_tensor = tf.convert_to_tensor(test_tensor_input, dtype=tf.float32)
out_tensor = Slice(1)(test_tensor)
out_tensor = out_tensor.eval()
assert np.allclose(out_tensor, test_tensor_input[:, 1:2])
def test_IRV(self):
"""Test that IRVLayer and IRVRegularize can be invoked."""
batch_size = 10
n_tasks = 5
K = 10
n_features = 2 * K * n_tasks
test_tensor_input = np.random.rand(batch_size, n_features)
with self.test_session() as sess:
test_tensor = tf.convert_to_tensor(test_tensor_input, dtype=tf.float32)
irv_layer = IRVLayer(n_tasks, K)
irv_layer.create_tensor(in_layers=[test_tensor])
out_tensor = irv_layer.out_tensor
sess.run(tf.global_variables_initializer())
out_tensor = out_tensor.eval()
assert out_tensor.shape == (batch_size, n_tasks)
irv_reg = IRVRegularize(irv_layer, 1.)()
assert irv_reg.eval() >= 0
def test_hingeloss(self):
separation = 0.25
labels = [1, 1, 0, 0]
logits = [.3, .1, -0.3, -0.1]
losses = np.array([0, 0.15, 0, 0.15], dtype=np.float32)
with self.test_session() as sess:
logits_tensor = tf.convert_to_tensor(logits, dtype=tf.float32)
labels_tensor = tf.convert_to_tensor(labels, dtype=tf.float32)
out_tensor = HingeLoss(separation=separation)(labels_tensor,
logits_tensor)
out_tensor = out_tensor.eval()
retval = np.all(losses == np.array(out_tensor))
self.assertTrue(retval)
|
|
# VNWA Control from a Python Appliction
# Copyright 2013 Colin O'Flynn
#
# Released under MIT License:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import win32api, win32ui, win32gui, win32con
import subprocess
from multiprocessing import Process
from subprocess import Popen
DETACHED_PROCESS = 0x00000008
import sys
import time
class VNWAConnector(object):
def __init__(self):
message_map = {
win32con.WM_USER: self.wndProc
}
wc = win32gui.WNDCLASS()
wc.lpfnWndProc = message_map
wc.lpszClassName = 'VNWAListenerClass'
hinst = wc.hInstance = win32api.GetModuleHandle(None)
classAtom = win32gui.RegisterClass(wc)
self.hwnd = win32gui.CreateWindow (
classAtom,
"VNWA Crappy Listener",
0,
0,
0,
win32con.CW_USEDEFAULT,
win32con.CW_USEDEFAULT,
0,
0,
hinst,
None
)
#print self.hwnd
self.waiting = True
def wndProc(self, hwnd, msg, wparam, lparam):
#print "%d: %d %d %d"%(hwnd, msg, wparam, lparam)
self.waiting = False
if (wparam & 0xffff0000) == 0:
self.VNWA_HWND = lparam
self.VNWA_MSG = wparam
print "Connected to VNWA Process (%d, %d)"%(self.VNWA_MSG, self.VNWA_HWND)
else:
ecode = wparam >> 16
if ecode == 1:
#print "Command OK"
pass
elif ecode == 2:
raise IOError("Script file error, or non-existent file")
elif ecode == 3:
raise IOError("File access error?")
elif ecode == 5:
raise IOError("VNWA Overload - check audio settings")
else:
raise ValueError("Unknown error = %d"%ecode)
def sendMessage(self, wparam, iparam, wait=True):
self.waiting = True
win32api.PostMessage(self.VNWA_HWND, self.VNWA_MSG, wparam, iparam)
if wait:
self.waitResponse()
def waitResponse(self, timeout=20):
for i in range(0,timeout*100):
win32gui.PumpWaitingMessages()
if self.waiting == False:
break
time.sleep(0.01)
def setRFile(self, rstring):
self.sendMessage(6, 0)
for r in rstring:
self.sendMessage(6, ord(r))
def setWFile(self, rstring):
self.sendMessage(7, 0)
for r in rstring:
self.sendMessage(7, ord(r))
class VNWA(object):
def startVNWA(self, exeloc, debug=True):
self.vnaconn = VNWAConnector()
cmd = [
exeloc,
'-remote',
'-callback',
str(self.vnaconn.hwnd),
str(win32con.WM_USER)
]
if debug:
cmd.append('-debug')
processVNWA = Popen(cmd,shell=False,stdin=None,stdout=None,stderr=None,close_fds=True,creationflags=DETACHED_PROCESS)
self.vnaconn.waitResponse()
def closeVNWA(self):
""" Terminate the VNWA Program """
self.vnaconn.sendMessage(0, 0)
def sweepOnce(self, S21=False, S11=False, S12=False, S22=False):
""" Do a Sweep """
swpmode = 0
if S21:
swpmode |= 1<<0
if S11:
swpmode |= 1<<1
if S12:
swpmode |= 1<<2
if S22:
swpmode |= 1<<3
self.vnaconn.sendMessage(1, swpmode)
def sweepContinous(self, S21=False, S11=False, S12=False, S22=False):
""" Do a Sweep """
swpmode = 0
if S21:
swpmode |= 1<<0
if S11:
swpmode |= 1<<1
if S12:
swpmode |= 1<<2
if S22:
swpmode |= 1<<3
self.vnaconn.sendMessage(18, swpmode)
def stopSweep(self, stopNow=False):
""" Stop sweep """
if stopNow:
par = 1
else:
par = 0
self.vnaconn.sendMessage(19, par)
def loadCal(self, filename):
""" Load cal file """
self.vnaconn.setRFile(filename)
self.vnaconn.sendMessage(2, 0)
def loadMasterCal(self, filename):
""" Load master cal file """
self.vnaconn.setRFile(filename)
self.vnaconn.sendMessage(3, 0)
def writeS2P(self, filename):
""" Write data to S2P File """
self.vnaconn.setWFile(filename)
self.vnaconn.sendMessage(4, 0)
def setStartFreq(self, freq):
""" Set sweet start frequency in Hz """
self.vnaconn.sendMessage(8, freq)
def setStopFreq(self, freq):
""" Set sweep stop frequency in Hz """
self.vnaconn.sendMessage(9, freq)
def setTXPowerLinear(self, power):
""" Set TX Power in range 0...16383 """
self.vnaconn.sendMessage(17, power)
def setTXPowerdBm(self, power):
""" Set TX Power in range -67 ... -17 dBm """
#Convert to linear
pw = 10 ** (power / 20.0)
#Scale by VNWA Constant
pw = pw * 115981.4
#Make integer
pw = round(pw)
if pw < 0:
pw = 0
if pw > 16383:
pw = 16383
print pw
self.setTXPowerLinear(pw)
def setRFFreq(self, freq):
""" Set RF DDS Frequency in Hz """
self.vnaconn.sendMessage(14, freq)
def setLOFreq(self, freq):
""" Set LO DDS Frequency in Hz """
self.vnaconn.sendMessage(15, freq)
def setVNWAFreq(self, freq):
""" Set RF & LO DDS Frequency in Hz with IF offset """
self.vnaconn.sendMessage(16, freq)
def main():
vna = VNWA()
vna.startVNWA('C://E//Documents//VNA Stuff//vnwa//VNWA.exe')
vna.setStartFreq(10E6)
vna.setStopFreq(100E6)
vna.loadCal('C:\ctest.cal')
vna.sweepOnce(S21=True)
vna.setVNWAFreq(1E6)
vna.setTXPowerdBm(-33)
if __name__ == "__main__":
main()
|
|
from JumpScale import j
base = j.tools.cuisine._getBaseClass()
class CuisinePNode(base):
def __init__(self, executor, cuisine):
self._executor = executor
self._cuisine = cuisine
self.defaultArch = ['amd64', 'i686']
@property
def hwplatform(self):
"""
example: hwplatform = rpi_2b, orangepi_plus, amd64
"""
_, arch, _ = self._cuisine.core.run('uname -m')
# generic detection
if arch == "x86_64":
return "amd64"
if arch == "i686":
return "x86"
# more precise detection
if arch == "armv7l":
if self._cuisine.core.dir_exists('/sys/class/bcm2708_vcio'):
return "rpi_2b"
if self._cuisine.core.file_exists('/dev/mmcblk1boot0'):
return 'orangepi_plus'
return None
"""
Disk stuff
"""
def _ensureDevName(self, device):
if not device.startswith("/dev"):
return "/dev/%s" % device
return device
def _getNeededPartitions(self):
needed = []
mounts = self._cuisine.core.file_read('/proc/mounts').splitlines()
for line in mounts:
# keep root partition
if " / " in line:
needed.append(line)
# keep boot partition
if " /boot " in line:
needed.append(line)
swaps = self._cuisine.core.file_read('/proc/swaps').splitlines()
for line in swaps:
# keep swap
if line.startswith('/'):
needed.append(line)
final = []
for item in needed:
final.append(item.replace('/dev/', '').partition(' ')[0])
return final
def _getDisks(self):
devices = self._cuisine.core.run('lsblk -n -l -o NAME,TYPE')[1].splitlines()
disks = []
for line in devices:
if "disk" in line:
disks.append(line.partition(' ')[0])
return disks
def _getDisksWithExclude(self, disks, exclude):
for disk in disks:
for keep in exclude:
if disk not in keep:
continue
if disk in disks:
disks.remove(disk)
return disks
def _eraseDisk(self, disk):
disk = self._ensureDevName(disk)
self._cuisine.core.run("dd if=/dev/zero of=%s bs=4M count=1" % disk)
def _getPartitionsOnDisk(self, disk):
disk = self._ensureDevName(disk)
partitions = self._cuisine.core.run('ls %s*' % disk)[1].splitlines()
return partitions
def _unmountDisk(self, disk):
"""
Unmount all partitions in disk
"""
partitions = self._getPartitionsOnDisk(disk)
for partition in partitions:
self._cuisine.core.run('umount %s' % partition, die=False)
def erase(self, keepRoot=True):
"""
if keepRoot == True:
find boot/root/swap partitions and leave them untouched (check if mirror, leave too)
clean/remove all (other) disks/partitions
"""
if self.hwplatform != "amd64":
raise j.exceptions.Input("only amd64 hw platform supported")
# grab the list of all disks on the machine
disks = self._getDisks()
if keepRoot:
# grab list of partitions needed to keep the machine alive
keeps = self._getNeededPartitions()
disks = self._getDisksWithExclude(disks, keeps)
# erasing all disks not needed
for disk in disks:
self._unmountDisk(disk)
self._eraseDisk(disk)
# commit changes to the kernel
self._cuisine.core.run("partprobe")
def importRoot(self, source="/image.tar.gz", destination="/"):
"""
Import and extract an archive to the filesystem
"""
cmd = 'tar -zpxf %s -C %s' % (source, destination)
self._cuisine.core.run(cmd)
def exportRoot(self, source="/", destination="/image.tar.gz", excludes=["\.pyc", "__pycache__"]):
"""
Create an archive of a remote file system
@param excludes is list of regex matches not to include while doing export
"""
excludes_string = " ".join(["--exclude='%s'" % x for x in excludes])
cmd = 'tar -zpcf %s --exclude=%s --one-file-system %s' % (destination, excludes_string, source)
self._cuisine.core.run(cmd)
def exportRootStor(self, storspace, plistname, source="/", excludes=["\.pyc", "__pycache__"], removetmpdir=True):
"""
reason to do this is that we will use this to then make the step to g8os with g8osfs (will be very small step then)
"""
storspace.upload(plistname, source=source, excludes=excludes, removetmpdir=removetmpdir)
def formatStorage(self, keepRoot=True, mountpoint="/storage"):
"""
use btrfs to format/mount the disks
use metadata & data in raid1 (if at least 2 disk)
make sure they are in fstab so survices reboot
"""
if self.hwplatform != "amd64":
raise j.exceptions.Input("only amd64 hw platform supported")
# grab the list of all disks on the machine
disks = self._getDisks()
if keepRoot:
# grab list of partitions needed to keep the machine alive
keeps = self._getNeededPartitions()
disks = self._getDisksWithExclude(disks, keeps)
for disk in disks:
if len(self._getPartitionsOnDisk(disk)) > 0:
j.exceptions.RuntimeError("Disk %s seems not empty, is the system clear ?")
setup = []
for disk in disks:
setup.append(self._ensureDevName(disk))
if not len(setup) == 0:
disklist = ' '.join(setup)
self._cuisine.core.run('mkfs.btrfs -d raid1 %s' % disklist)
self._cuisine.core.dir_ensure(mountpoint)
self._cuisine.core.run('mount %s %s' % (setup[0], mountpoint))
else:
# check if no mounted btrfs partition yet and create if required
self._cuisine.btrfs.subvolumeCreate(mountpoint)
def buildG8OSImage(self):
"""
"""
# TODO: cuisine enable https://github.com/g8os/builder
def buildArchImage(self):
"""
"""
def installArch(self, rootsize=5):
"""
install arch on $rootsize GB root partition
"""
if self.hwplatform != "amd64":
raise j.exceptions.Input("only amd64 hw platform supported")
# manual partitioning
# get tgz from url="https://stor.jumpscale.org/public/ubuntu....tgz"
def installG8OS(self, rootsize=5):
"""
install g8os on $rootsize GB root partition
"""
if self.hwplatform != "amd64":
raise j.exceptions.Input("only amd64 hw platform supported")
# manual partitioning
# get tgz from url="https://stor.jumpscale.org/public/ubuntu....tgz"
|
|
"""
STARBURST ACC/FEANTA GeoBrick Worker
Author: Lokbondo Kung
Email: lkkung@caltech.edu
"""
import i_worker
import socket
import struct
# Description of the GeoBrick device. Currently hard-coded.
BRICK_HOSTNAME = 'geobrickanta.solar.pvt'
BRICK_PORT = 1025
BRICK_TIMEOUT = 0.5
# Program spaces that can be used in the GeoBrick.
COMMAND_REGIS = 'P1000='
ARG1_REGIS = ' P1001='
ARG2_REGIS = ' P1002='
# Brick command dictionary.
COMMAND_DICT = {'Home': 1,
'SelRx': 2,
'SetAngle': 3,
'SetZOffset': 4,
'SetXOffset': 5,
'Kill': 6,
'Enable': 7,
'SetX': 8,
'SetZ': 9}
# Dictionaries for ethernet packets to the Brick.
RQ_TYPE = {'upload': '\xc0',
'download': '\x40'}
RQ = {'sendline': '\xb0',
'getline': '\xb1',
'flush': '\xb3',
'getmem': '\xb4',
'setmem': '\xb5',
'setbit': '\xba',
'setbits': '\xbb',
'port': '\xbe',
'getresponse': '\xbf',
'readready': '\xc2',
'response': '\xc4',
'getbuffer': '\xc5',
'writebuffer': '\xc6',
'writeerror': '\xc7',
'fwdownload': '\xcb',
'ipaddress': '\xe0'}
COORDINATE = {1: 'Z',
3: 'A',
4: 'X'}
AXIS_SCALING = {1: 42.5636 * 96 * 32,
3: 23181.5208 * 96 * 32,
4: 3973.477 * 96 * 32}
MPADDRESSSTART = 900
class BrickWorker(i_worker.IWorker):
def __init__(self):
super(BrickWorker, self).__init__()
self.commands = ['FRM-HOME',
'FRM-KILL',
'FRM-RX-SEL',
'FRM-SET-PA',
'FRM-X-OFFSET',
'FRM-Z-OFFSET',
'FRM-ABS-X',
'FRM-ABS-Z',
'FRM-ENABLE']
self.brick_socket = None
self.brick_ip = socket.gethostbyname(BRICK_HOSTNAME)
self.name = 'GeoBrick-Worker'
# ---------------------------------------------------------------
# COMMAND PACKAGING ROUTINES SPECIFIC TO GEOBRICK
# ---------------------------------------------------------------
#region Method Description
"""
Method: __make_brick_command
Description:
Takes a command to the Brick and packages it into an
ethernet packet recognized by the Brick system.
Arguments:
rq_type: type of request, either 'upload' or 'download'.
rq: nature of request, lookup dictionary defined in RQ.
val: value associated with the request.
index: index associated with the request.
command_packets: list of strings to be packed into TCP packets.
"""
#endregion
def __make_brick_command(self, rq_type, rq, val, index, command_packets):
packets = []
for packet in command_packets:
buf = RQ_TYPE[rq_type] + RQ[rq]
buf += struct.pack('H', val)
buf += struct.pack('H', index)
buf += struct.pack('H', socket.htons(len(packet) + 1))
buf += struct.pack(str(len(packet)) + 's', packet)
buf += struct.pack("B", 0)
packets.append(buf)
return packets
# ---------------------------------------------------------------
# COMMAND ROUTINES
# ---------------------------------------------------------------
#region Method Description
"""
Method: __frm_home
Description:
Runs homing procedure local to the GeoBrick. Do NOT use this
method on its own. This method is error checked before execution.
Arguments:
acc_command: list of the strings sent from the ACC. List format:
['FRM-HOME']
Returns:
[0]: A list of packets as strings before compression.
[1]: A list of TCP/Ethernet packets ready to be sent to the Brick.
"""
#endregion
def __frm_home(self, acc_command):
# Error check that the command given is formatted correctly.
if len(acc_command) != 1:
self.logger('Invalid call to FRM-HOME.')
return None
command_packets = []
command = COMMAND_REGIS + str(COMMAND_DICT['Home'])
command_packets.append(command)
return command_packets, \
self.__make_brick_command('download', 'getresponse',
0, 0, command_packets)
#region Method Description
"""
Method: __frm_rx_sel
Description:
Routine to select one of two receivers on the antenna
Arguments:
acc_command: list of strings sent from the ACC. List format:
['FRM-RX-SEL', rx] where rx is 1 for low-nu and 2 for high-nu.
Returns:
[0]: A list of packets as strings before compression.
[1]: A list of TCP/Ethernet packets ready to be sent to the Brick.
"""
#endregion
def __frm_rx_sel(self, acc_command):
# Error check that the command given is formatted correctly.
if len(acc_command) != 2:
self.logger('Invalid call to FRM-RX-SEL.')
return None
rx = None
try:
rx = int(acc_command[1])
if rx not in [1, 2]:
raise ValueError('Invalid RX selection.')
except ValueError:
self.logger('Invalid call to FRM-RX-SEL.')
return None
# Build command based on parameters.
command = COMMAND_REGIS + str(COMMAND_DICT['SelRx']) + \
ARG1_REGIS + str(rx)
command_packets = [command]
return command_packets, self.__make_brick_command('download',
'getresponse',
0, 0,
command_packets)
#region Method Description
"""
Method: __frm_set_pa
Description:
Routine to move motor 3 to a given angle. This routine should only
be called after a FRM_HOME command has been issued. Do NOT use
this method on its own.
Arguments:
acc_command: list of strings sent from the ACC. List format:
['FRM-SET-PA', angle] where angle is the absolute angle to be
set.
Returns:
[0]: A list of packets as strings before compression.
[1]: A list of TCP/Ethernet packets ready to be sent to the Brick.
"""
#endregion
def __frm_set_pa(self, acc_command):
# Error check that the command given is formatted correctly.
if len(acc_command) != 2:
self.logger('Invalid call to FRM-SET-PA.')
return None
angle = None
try:
angle = int(acc_command[1])
if angle > 90 or angle < -90:
raise ValueError('Invalid position angle selection.')
except ValueError:
self.logger('Invalid call to FRM-SET-PA.')
return None
# Build command based on parameters.
command = COMMAND_REGIS + str(COMMAND_DICT['SetAngle']) + \
ARG1_REGIS + str(angle)
command_packets = [command]
return command_packets, self.__make_brick_command('download',
'getresponse',
0, 0,
command_packets)
def __frm_x_offset(self, acc_command):
# Error check that the command given is formatted correctly.
if len(acc_command) != 2:
self.logger('Invalid call to FRM-X-OFFSET.')
return None
offset = None
try:
offset = float(acc_command[1])
except ValueError:
self.logger('Invalid call to FRM-X-OFFSET.')
return None
command = COMMAND_REGIS + str(COMMAND_DICT['SetXOffset']) + \
ARG1_REGIS + str(offset)
# Build command based on parameters. (This assumes that the
# position given is in physical units.)
command_packets = [command]
return command_packets, \
self.__make_brick_command('download', 'getresponse',
0, 0, command_packets)
def __frm_z_offset(self, acc_command):
# Error check that the command given is formatted correctly.
if len(acc_command) != 2:
self.logger('Invalid call to FRM-Z-OFFSET.')
return None
offset = None
try:
offset = float(acc_command[1])
except ValueError:
self.logger('Invalid call to FRM-Z-OFFSET.')
return None
command = COMMAND_REGIS + str(COMMAND_DICT['SetZOffset']) + \
ARG1_REGIS + str(offset)
# Build command based on parameters. (This assumes that the
# position given is in physical units.)
command_packets = [command]
return command_packets, \
self.__make_brick_command('download', 'getresponse',
0, 0, command_packets)
#region Method Description
"""
Method: __frm_abs_x
Description:
Routine to move x-axis to specified location
Arguments:
acc_command: list of strings sent from the ACC. List format:
['FRM-ABS-X', destination] where destination is the destination
in physical units (mm).
Returns:
[0]: A list of packets as strings before compression.
[1]: A list of TCP/Ethernet packets ready to be sent to the Brick.
"""
#endregion
def __frm_abs_x(self, acc_command):
# Error check that the command given is formatted correctly.
if len(acc_command) != 2:
self.logger('Invalid call to FRM-ABS-X.')
return None
position = None
try:
position = float(acc_command[1])
except ValueError:
self.logger('Invalid call to FRM-ABS-X.')
return None
command = COMMAND_REGIS + str(COMMAND_DICT['SetX']) + \
ARG1_REGIS + str(position)
# Build command based on parameters. (This assumes that the
# position given is in physical units.)
command_packets = [command]
return command_packets, \
self.__make_brick_command('download', 'getresponse',
0, 0, command_packets)
#region Method Description
"""
Method: __frm_abs_z
Description:
Routine to move z-axis to specified location
Arguments:
acc_command: list of strings sent from the ACC. List format:
['FRM-ABS-Z', destination] where destination is the destination
in physical units (mm).
Returns:
[0]: A list of packets as strings before compression.
[1]: A list of TCP/Ethernet packets ready to be sent to the Brick.
"""
#endregion
def __frm_abs_z(self, acc_command):
# Error check that the command given is formatted correctly.
if len(acc_command) != 2:
self.logger('Invalid call to FRM-ABS-Z.')
return None
position = None
try:
position = float(acc_command[1])
except ValueError:
self.logger('Invalid call to FRM-ABS-Z.')
return None
command = COMMAND_REGIS + str(COMMAND_DICT['SetZ']) + \
ARG1_REGIS + str(position)
# Build command based on parameters. (This assumes that the
# position given is in physical units.)
command_packets = [command]
return command_packets, \
self.__make_brick_command('download', 'getresponse',
0, 0, command_packets)
def __frm_kill(self, acc_command):
# Error check that the command given was formatted correctly.
if len(acc_command) != 1:
self.logger('Invalid call to FRM-KILL')
return None
command_packets = []
command = COMMAND_REGIS + str(COMMAND_DICT['Kill'])
command_packets.append(command)
return command_packets, \
self.__make_brick_command('download', 'getresponse',
0, 0, command_packets)
def __frm_enable(self, acc_command):
# Error check that the command given was formatted correctly.
if len(acc_command) != 1:
self.logger('Invalid call to FRM-ENABLE')
return None
command_packets = []
command = COMMAND_REGIS + str(COMMAND_DICT['Enable'])
command_packets.append(command)
return command_packets, \
self.__make_brick_command('download', 'getresponse',
0, 0, command_packets)
# ---------------------------------------------------------------
# FUNCTION MAP
# ---------------------------------------------------------------
function_map = {'FRM-HOME': __frm_home,
'FRM-KILL': __frm_kill,
'FRM-RX-SEL': __frm_rx_sel,
'FRM-SET-PA': __frm_set_pa,
'FRM-X-OFFSET': __frm_x_offset,
'FRM-Z-OFFSET': __frm_z_offset,
'FRM-ABS-X': __frm_abs_x,
'FRM-ABS-Z': __frm_abs_z,
'FRM-ENABLE': __frm_enable}
# ---------------------------------------------------------------
# STATEFRAME HELPERS
# ---------------------------------------------------------------
def __brickmonitor_query(self):
command = 'LIST GATHER'
query_socket = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
query_socket.settimeout(BRICK_TIMEOUT)
query_socket.connect((self.brick_ip, BRICK_PORT))
cmd_string = [command]
cmd = self.__make_brick_command('download', 'getresponse',
0, 0, cmd_string)
query_socket.sendall(cmd[0])
response = query_socket.recv(1024)
query_socket.close()
response = response.replace('\r', ' ')
response = response.split(' ')
parsed_response = []
for monitor_point in response:
parsed_response.append(self.__str2float(monitor_point))
return parsed_response
def __str2float(self, str_val):
num = 0
try:
num = int(str_val, 16)
except Exception:
num = 0
return (num >> 12) * 2**((num & 0xFFF) - 2082)
# ---------------------------------------------------------------
# INTERFACE IMPLEMENTATIONS
# ---------------------------------------------------------------
# region Method Description
"""
Method: get_command_list
Description:
Refer to abstract class IWorker located in i_worker.py
for full description.
"""
# endregion
def get_command_list(self):
return self.commands
# region Method Description
"""
Method: execute
Description:
Refer to abstract class IWorker located in i_worker.py
for full description.
"""
# endregion
def execute(self, acc_command):
# Use the routine functions to get the commands to push.
packets = self.function_map[acc_command[0]](
self, acc_command)
if packets is not None:
self.logger('Issued the following commands to brick:')
for packet in packets[0]:
self.logger(repr(packet))
# Try pushing message across TCP.
# Wait for reply of at most 1024 bytes.
try:
for packet in packets[1]:
reply = None
self.brick_socket = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
self.brick_socket.connect((self.brick_ip, BRICK_PORT))
self.brick_socket.sendall(packet)
self.brick_socket.settimeout(BRICK_TIMEOUT)
reply = self.brick_socket.recv(1024)
self.logger('Reply from brick: ' + reply)
self.brick_socket.close()
self.brick_socket = None
except socket.gaierror:
self.logger('Brick hostname could not be resolved.')
except socket.error:
self.logger('Unable to send packet to brick.')
# region Method Description
"""
Method: stateframe_query
Description:
Refer to abstract class IWorker located in i_worker.py
for full description.
"""
# endregion
def stateframe_query(self):
stateframe_data = {'AXIS1': {},
'AXIS3': {},
'AXIS4': {}}
fetched_data = self.__brickmonitor_query()
stateframe_data['HOMED'] = \
int(fetched_data[1])
stateframe_data['RXSEL'] = \
int(fetched_data[2])
stateframe_data['AXIS1']['P'] = \
float(fetched_data[3])
stateframe_data['AXIS1']['PERR'] = \
float(fetched_data[4])
stateframe_data['AXIS1']['POFF'] = \
float(fetched_data[5])
stateframe_data['AXIS1']['I'] = \
float(fetched_data[6])
stateframe_data['AXIS1']['POSLIMIT'] = \
int(fetched_data[7])
stateframe_data['AXIS1']['NEGLIMIT'] = \
int(fetched_data[8])
stateframe_data['AXIS1']['AMPFAULT'] = \
int(fetched_data[9])
stateframe_data['AXIS3']['P'] = \
float(fetched_data[10])
stateframe_data['AXIS3']['PERR'] = \
float(fetched_data[11])
stateframe_data['AXIS3']['POFF'] = \
float(fetched_data[12])
stateframe_data['AXIS3']['I'] = \
float(fetched_data[13])
stateframe_data['AXIS3']['POSLIMIT'] = \
int(fetched_data[14])
stateframe_data['AXIS3']['NEGLIMIT'] = \
int(fetched_data[15])
stateframe_data['AXIS3']['AMPFAULT'] = \
int(fetched_data[16])
stateframe_data['AXIS4']['P'] = \
float(fetched_data[17])
stateframe_data['AXIS4']['PERR'] = \
float(fetched_data[18])
stateframe_data['AXIS4']['POFF'] = \
float(fetched_data[19])
stateframe_data['AXIS4']['I'] = \
float(fetched_data[20])
stateframe_data['AXIS4']['POSLIMIT'] = \
int(fetched_data[21])
stateframe_data['AXIS4']['NEGLIMIT'] = \
int(fetched_data[22])
stateframe_data['AXIS4']['AMPFAULT'] = \
int(fetched_data[23])
return stateframe_data
|
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, unicode_literals)
"""
A Python Singleton mixin class that makes use of some of the ideas
found at http://c2.com/cgi/wiki?PythonSingleton. Just inherit
from it and you have a singleton. No code is required in
subclasses to create singleton behavior -- inheritance from
Singleton is all that is needed.
Singleton creation is threadsafe.
USAGE:
Just inherit from Singleton. If you need a constructor, include
an __init__() method in your class as you usually would. However,
if your class is S, you instantiate the singleton using S.get_instance()
instead of S(). Repeated calls to S.get_instance() return the
originally-created instance.
For example:
class S(Singleton):
def __init__(self, a, b=1):
pass
S1 = S.get_instance(1, b=3)
Most of the time, that"s all you need to know. However, there are some
other useful behaviors. Read on for a full description:
1) Getting the singleton:
S.get_instance()
returns the instance of S. If none exists, it is created.
3) Use __S.__init__() for instantiation processing,
since S.get_instance() runs S.__init__(), passing it the args it has received.
If no data needs to be passed in at instantiation time, you don"t need S.__init__().
4) If S.__init__(.) requires parameters, include them ONLY in the
first call to S.get_instance(). If subsequent calls have arguments,
a SingletonException is raised by default.
If you find it more convenient for subsequent calls to be allowed to
have arguments, but for those argumentsto be ignored, just include
"ignore_subsequent = True" in your class definition, i.e.:
class S(Singleton):
ignore_subsequent = True
def __init__(self, a, b=1):
pass
5) For testing, it is sometimes convenient for all existing singleton
instances to be forgotten, so that new instantiations can occur. For that
reason, a forget_all_singletons() function is included. Just call
forget_all_singletons()
and it is as if no earlier instantiations have occurred.
6) As an implementation detail, classes that inherit
from Singleton may not have their own __new__
methods. To make sure this requirement is followed,
an exception is raised if a Singleton subclass includ
es __new__. This happens at subclass instantiation
time (by means of the MetaSingleton metaclass.
By Gary Robinson, grobinson@flyfi.com. No rights reserved --
placed in the public domain -- which is only reasonable considering
how much it owes to other people"s code and ideas which are in the
public domain. The idea of using a metaclass came from
a comment on Gary"s blog (see
http://www.garyrobinson.net/2004/03/python_singleto.html#comments).
Other improvements came from comments and email from other
people who saw it online. (See the blog post and comments
for further credits.)
Not guaranteed to be fit for any particular purpose. Use at your
own risk.
"""
import threading
class SingletonException(Exception):
pass
_st_singletons = set()
_lock_for_singletons = threading.RLock()
_lock_for_singleton_creation = threading.RLock() # Ensure only one instance of each Singleton
# class is created. This is not bound to the
# individual Singleton class since we need to
# ensure that there is only one mutex for each
# Singleton class, which would require having
# a lock when setting up the Singleton class,
# which is what this is anyway. So, when any
# Singleton is created, we lock this lock and
# then we don"t need to lock it again for that
# class.
def _create_singleton_instance(cls, args, kw_args):
_lock_for_singleton_creation.acquire()
try:
if cls._is_instantiated(): # some other thread got here first
return
instance = cls.__new__(cls)
try:
instance.__init__(*args, **kw_args)
except TypeError as e:
if e.message.find("__init__() takes") != -1:
raise SingletonException("If the singleton requires __init__ "\
"args, supply them on only on the first call to "\
"get_instance().")
else:
raise
cls.c_instance = instance
_add_singleton(cls)
finally:
_lock_for_singleton_creation.release()
def _add_singleton(cls):
_lock_for_singletons.acquire()
try:
assert cls not in _st_singletons
_st_singletons.add(cls)
finally:
_lock_for_singletons.release()
def _remove_singleton(cls):
_lock_for_singletons.acquire()
try:
if cls in _st_singletons:
_st_singletons.remove(cls)
finally:
_lock_for_singletons.release()
def forget_all_singletons():
"""This is useful in tests, since it is hard to know which singletons need
to be cleared to make a test work."""
_lock_for_singletons.acquire()
try:
for cls in _st_singletons.copy():
cls._forget_class_instance_reference_for_testing()
# Might have created some Singletons in the process of tearing down.
# Try one more time - there should be a limit to this.
i_num_singletons = len(_st_singletons)
if len(_st_singletons) > 0:
for cls in _st_singletons.copy():
cls._forget_class_instance_reference_for_testing()
i_num_singletons -= 1
assert i_num_singletons == len(_st_singletons),\
"Added a singleton while destroying " + str(cls)
assert len(_st_singletons) == 0, _st_singletons
finally:
_lock_for_singletons.release()
class MetaSingleton(type):
def __new__(metaclass, name, bases, dct):
if "__new__" in dct:
raise SingletonException("Can not override __new__ in a Singleton")
return super(MetaSingleton, metaclass).__new__(metaclass, name, bases, dct)
def __call__(cls, *args, **kw_args):
raise SingletonException("Singletons may only be instantiated through"\
" get_instance()")
class Singleton(object):
__metaclass__ = MetaSingleton
def get_instance(cls, *args, **kw_args):
"""
Call this to instantiate an instance or retrieve the existing instance.
If the singleton requires args to be instantiated, include them the first
time you call get_instance.
"""
if cls._is_instantiated():
if (args or kw_args) and not hasattr(cls, "ignore_subsequent"):
raise SingletonException("Singleton already instantiated, but"\
" get_instance() called with args.")
else:
_create_singleton_instance(cls, args, kw_args)
return cls.c_instance
get_instance = classmethod(get_instance)
def _is_instantiated(cls):
# Don"t use hasattr(cls, "c_instance"), because that screws things up if there is a singleton that
# extends another singleton. hasattr looks in the base class if it doesn"t find in subclass.
return "c_instance" in cls.__dict__
_is_instantiated = classmethod(_is_instantiated)
# This can be handy for public use also
is_instantiated = _is_instantiated
def _forget_class_instance_reference_for_testing(cls):
"""
This is designed for convenience in testing -- sometimes you
want to get rid of a singleton during test code to see what
happens when you call get_instance() under a new situation.
To really delete the object, all external references to it
also need to be deleted.
"""
try:
if hasattr(cls.c_instance, "_prepare_to_forget_singleton"):
# tell instance to release anything it might be holding onto.
cls.c_instance._prepare_to_forget_singleton()
del cls.c_instance
_remove_singleton(cls)
except AttributeError:
# run up the chain of base classes until we find the one that has the instance
# and then delete it there
for base_cls in cls.__bases__:
if issubclass(base_cls, Singleton):
base_cls._forget_class_instance_reference_for_testing()
_forget_class_instance_reference_for_testing = classmethod(_forget_class_instance_reference_for_testing)
|
|
import base64
import calendar
import datetime
import re
import unicodedata
from binascii import Error as BinasciiError
from email.utils import formatdate
from urllib.parse import (
ParseResult, SplitResult, _coerce_args, _splitnetloc, _splitparams, quote,
quote_plus, scheme_chars, unquote, unquote_plus,
urlencode as original_urlencode, uses_params,
)
from django.core.exceptions import TooManyFieldsSent
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_bytes
from django.utils.functional import keep_lazy_text
# based on RFC 7232, Appendix C
ETAG_MATCH = re.compile(r'''
\A( # start of string and capture group
(?:W/)? # optional weak indicator
" # opening quote
[^"]* # any sequence of non-quote characters
" # end quote
)\Z # end of string and capture group
''', re.X)
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = ":/?#[]@"
RFC3986_SUBDELIMS = "!$&'()*+,;="
FIELDS_MATCH = re.compile('[&;]')
@keep_lazy_text
def urlquote(url, safe='/'):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote() function.
(was used for unicode handling on Python 2)
"""
return quote(url, safe)
@keep_lazy_text
def urlquote_plus(url, safe=''):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote_plus()
function. (was used for unicode handling on Python 2)
"""
return quote_plus(url, safe)
@keep_lazy_text
def urlunquote(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote() function.
(was used for unicode handling on Python 2)
"""
return unquote(quoted_url)
@keep_lazy_text
def urlunquote_plus(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote_plus()
function. (was used for unicode handling on Python 2)
"""
return unquote_plus(quoted_url)
def urlencode(query, doseq=False):
"""
A version of Python's urllib.parse.urlencode() function that can operate on
MultiValueDict and non-string values.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
query_params = []
for key, value in query:
if isinstance(value, (str, bytes)):
query_val = value
else:
try:
iter(value)
except TypeError:
query_val = value
else:
# Consume generators and iterators, even when doseq=True, to
# work around https://bugs.python.org/issue31706.
query_val = [
item if isinstance(item, bytes) else str(item)
for item in value
]
query_params.append((key, query_val))
return original_urlencode(query_params, doseq)
def cookie_date(epoch_seconds=None):
"""
Format the time to ensure compatibility with Netscape's cookie standard.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Format the time to match the RFC1123 date format as specified by HTTP
RFC7231 section 7.1.1.1.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Return an integer expressed in seconds since the epoch, in UTC.
"""
# email.utils.parsedate() does the job for RFC1123 dates; unfortunately
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception as exc:
raise ValueError("%r is not a valid date" % date) from exc
def parse_http_date_safe(date):
"""
Same as parse_http_date, but return None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Convert a base 36 string to an int. Raise ValueError if the input won't fit
into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is longer than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
return int(s, 36)
def int_to_base36(i):
"""Convert an integer to a base36 string."""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encode a bytestring in base64 for use in URLs. Strip any trailing equal
signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=')
def urlsafe_base64_decode(s):
"""
Decode a base64 encoded string. Add back any trailing equal signs that
might have been stripped.
"""
s = force_bytes(s)
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parse a string of ETags given in an If-None-Match or If-Match header as
defined by RFC 7232. Return a list of quoted ETags, or ['*'] if all ETags
should be matched.
"""
if etag_str.strip() == '*':
return ['*']
else:
# Parse each ETag individually, and return any that are valid.
etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(','))
return [match.group(1) for match in etag_matches if match]
def quote_etag(etag_str):
"""
If the provided string is already a quoted ETag, return it. Otherwise, wrap
the string in quotes, making it a strong ETag.
"""
if ETAG_MATCH.match(etag_str):
return etag_str
else:
return '"%s"' % etag_str
def is_same_domain(host, pattern):
"""
Return ``True`` if the host is either an exact match or a match
to the wildcard pattern.
Any pattern beginning with a period matches a domain and all of its
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
``foo.example.com``). Anything else is an exact string match.
"""
if not pattern:
return False
pattern = pattern.lower()
return (
pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or
pattern == host
)
def is_safe_url(url, allowed_hosts=None, require_https=False):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always return ``False`` on an empty url.
If ``require_https`` is ``True``, only 'https' will be considered a valid
scheme, as opposed to 'http' and 'https' with the default, ``False``.
"""
if url is not None:
url = url.strip()
if not url:
return False
if allowed_hosts is None:
allowed_hosts = set()
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return (_is_safe_url(url, allowed_hosts, require_https=require_https) and
_is_safe_url(url.replace('\\', '/'), allowed_hosts, require_https=require_https))
# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function.
def _urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = _urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
# Copied from urllib.parse.urlsplit() with
# https://github.com/python/cpython/pull/661 applied.
def _urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
allow_fragments = bool(allow_fragments)
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
for c in url[:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i + 1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
return _coerce_result(v)
def _is_safe_url(url, allowed_hosts, require_https=False):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
try:
url_info = _urlparse(url)
except ValueError: # e.g. invalid IPv6 addresses
return False
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
scheme = url_info.scheme
# Consider URLs without a scheme (e.g. //example.com/p) to be http.
if not url_info.scheme and url_info.netloc:
scheme = 'http'
valid_schemes = ['https'] if require_https else ['http', 'https']
return ((not url_info.netloc or url_info.netloc in allowed_hosts) and
(not scheme or scheme in valid_schemes))
def limited_parse_qsl(qs, keep_blank_values=False, encoding='utf-8',
errors='replace', fields_limit=None):
"""
Return a list of key/value tuples parsed from query string.
Copied from urlparse with an additional "fields_limit" argument.
Copyright (C) 2013 Python Software Foundation (see LICENSE.python).
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
fields_limit: maximum number of fields parsed or an exception
is raised. None means no limit and is the default.
"""
if fields_limit:
pairs = FIELDS_MATCH.split(qs, fields_limit)
if len(pairs) > fields_limit:
raise TooManyFieldsSent(
'The number of GET/POST parameters exceeded '
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
)
else:
pairs = FIELDS_MATCH.split(qs)
r = []
for name_value in pairs:
if not name_value:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
r.append((name, value))
return r
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Based on Jimmy Tang's implementation
DOCUMENTATION = '''
---
module: keystone_user
version_added: "1.2"
short_description: Manage OpenStack Identity (keystone) users, tenants and roles
description:
- Manage users,tenants, roles from OpenStack.
options:
login_user:
description:
- login username to authenticate to keystone
required: false
default: admin
login_password:
description:
- Password of login user
required: false
default: 'yes'
login_tenant_name:
description:
- The tenant login_user belongs to
required: false
default: None
version_added: "1.3"
token:
description:
- The token to be uses in case the password is not specified
required: false
default: None
endpoint:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
user:
description:
- The name of the user that has to added/removed from OpenStack
required: false
default: None
password:
description:
- The password to be assigned to the user
required: false
default: None
tenant:
description:
- The tenant name that has be added/removed
required: false
default: None
tenant_description:
description:
- A description for the tenant
required: false
default: None
email:
description:
- An email address for the user
required: false
default: None
role:
description:
- The name of the role to be assigned or created
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
requirements: [ python-keystoneclient ]
author: Lorin Hochstein
'''
EXAMPLES = '''
# Create a tenant
- keystone_user: tenant=demo tenant_description="Default Tenant"
# Create a user
- keystone_user: user=john tenant=demo password=secrete
# Apply the admin role to the john user in the demo tenant
- keystone_user: role=admin user=john tenant=demo
'''
try:
from keystoneclient.v2_0 import client
except ImportError:
keystoneclient_found = False
else:
keystoneclient_found = True
def authenticate(endpoint, token, login_user, login_password, login_tenant_name):
"""Return a keystone client object"""
if token:
return client.Client(endpoint=endpoint, token=token)
else:
return client.Client(auth_url=endpoint, username=login_user,
password=login_password, tenant_name=login_tenant_name)
def tenant_exists(keystone, tenant):
""" Return True if tenant already exists"""
return tenant in [x.name for x in keystone.tenants.list()]
def user_exists(keystone, user):
"""" Return True if user already exists"""
return user in [x.name for x in keystone.users.list()]
def get_tenant(keystone, name):
""" Retrieve a tenant by name"""
tenants = [x for x in keystone.tenants.list() if x.name == name]
count = len(tenants)
if count == 0:
raise KeyError("No keystone tenants with name %s" % name)
elif count > 1:
raise ValueError("%d tenants with name %s" % (count, name))
else:
return tenants[0]
def get_user(keystone, name):
""" Retrieve a user by name"""
users = [x for x in keystone.users.list() if x.name == name]
count = len(users)
if count == 0:
raise KeyError("No keystone users with name %s" % name)
elif count > 1:
raise ValueError("%d users with name %s" % (count, name))
else:
return users[0]
def get_role(keystone, name):
""" Retrieve a role by name"""
roles = [x for x in keystone.roles.list() if x.name == name]
count = len(roles)
if count == 0:
raise KeyError("No keystone roles with name %s" % name)
elif count > 1:
raise ValueError("%d roles with name %s" % (count, name))
else:
return roles[0]
def get_tenant_id(keystone, name):
return get_tenant(keystone, name).id
def get_user_id(keystone, name):
return get_user(keystone, name).id
def ensure_tenant_exists(keystone, tenant_name, tenant_description,
check_mode):
""" Ensure that a tenant exists.
Return (True, id) if a new tenant was created, (False, None) if it
already existed.
"""
# Check if tenant already exists
try:
tenant = get_tenant(keystone, tenant_name)
except KeyError:
# Tenant doesn't exist yet
pass
else:
if tenant.description == tenant_description:
return (False, tenant.id)
else:
# We need to update the tenant description
if check_mode:
return (True, tenant.id)
else:
tenant.update(description=tenant_description)
return (True, tenant.id)
# We now know we will have to create a new tenant
if check_mode:
return (True, None)
ks_tenant = keystone.tenants.create(tenant_name=tenant_name,
description=tenant_description,
enabled=True)
return (True, ks_tenant.id)
def ensure_tenant_absent(keystone, tenant, check_mode):
""" Ensure that a tenant does not exist
Return True if the tenant was removed, False if it didn't exist
in the first place
"""
if not tenant_exists(keystone, tenant):
return False
# We now know we will have to delete the tenant
if check_mode:
return True
def ensure_user_exists_and_is_current(keystone, endpoint, user_name, password, email, tenant_name,
check_mode):
""" Check if user exists and has the same email and password
Return (True, id) if a new user was created or one was updated, (False, id) if the user is
up to date
"""
# Check if tenant already exists
try:
user = get_user(keystone, user_name)
except KeyError:
# Tenant doesn't exist yet
user = None
pass
else:
# User does exist, check if it's current
try:
authenticate(endpoint, None, user_name, password, tenant_name)
except:
pass
else:
# It's current, we're done
return (False, user.id)
# We now know we will have to create a new user
if check_mode:
return (True, None)
tenant = get_tenant(keystone, tenant_name)
if (not user):
user = keystone.users.create(name=user_name, password=password,
email=email, tenant_id=tenant.id)
else:
user = keystone.users.update_password(user.id, password)
return (True, user.id)
def ensure_role_exists(keystone, user_name, tenant_name, role_name,
check_mode):
""" Check if role exists
Return (True, id) if a new role was created or if the role was newly
assigned to the user for the tenant. (False, id) if the role already
exists and was already assigned to the user ofr the tenant.
"""
# Check if the user has the role in the tenant
user = get_user(keystone, user_name)
tenant = get_tenant(keystone, tenant_name)
roles = [x for x in keystone.roles.roles_for_user(user, tenant)
if x.name == role_name]
count = len(roles)
if count == 1:
# If the role is in there, we are done
role = roles[0]
return (False, role.id)
elif count > 1:
# Too many roles with the same name, throw an error
raise ValueError("%d roles with name %s" % (count, role_name))
# At this point, we know we will need to make changes
if check_mode:
return (True, None)
# Get the role if it exists
try:
role = get_role(keystone, role_name)
except KeyError:
# Role doesn't exist yet
role = keystone.roles.create(role_name)
# Associate the role with the user in the admin
keystone.roles.add_user_role(user, role, tenant)
return (True, role.id)
def ensure_user_absent(keystone, user, check_mode):
raise NotImplementedError("Not yet implemented")
def ensure_role_absent(keystone, uesr, tenant, role, check_mode):
raise NotImplementedError("Not yet implemented")
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
tenant_description=dict(required=False),
email=dict(required=False),
user=dict(required=False),
tenant=dict(required=False),
password=dict(required=False),
role=dict(required=False),
state=dict(default='present', choices=['present', 'absent']),
endpoint=dict(required=False,
default="http://127.0.0.1:35357/v2.0"),
token=dict(required=False),
login_user=dict(required=False),
login_password=dict(required=False),
login_tenant_name=dict(required=False)
))
# keystone operations themselves take an endpoint, not a keystone auth_url
del(argument_spec['auth_url'])
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['token', 'login_user'],
['token', 'login_password'],
['token', 'login_tenant_name']]
)
if not keystoneclient_found:
module.fail_json(msg="the python-keystoneclient module is required")
user = module.params['user']
password = module.params['password']
tenant = module.params['tenant']
tenant_description = module.params['tenant_description']
email = module.params['email']
role = module.params['role']
state = module.params['state']
endpoint = module.params['endpoint']
token = module.params['token']
login_user = module.params['login_user']
login_password = module.params['login_password']
login_tenant_name = module.params['login_tenant_name']
keystone = authenticate(endpoint, token, login_user, login_password, login_tenant_name)
check_mode = module.check_mode
try:
d = dispatch(keystone, user, password, tenant, tenant_description,
email, role, state, endpoint, token, login_user,
login_password, check_mode)
except Exception, e:
if check_mode:
# If we have a failure in check mode
module.exit_json(changed=True,
msg="exception: %s" % e)
else:
module.fail_json(msg="exception: %s" % e)
else:
module.exit_json(**d)
def dispatch(keystone, user=None, password=None, tenant=None,
tenant_description=None, email=None, role=None,
state="present", endpoint=None, token=None, login_user=None,
login_password=None, check_mode=False):
""" Dispatch to the appropriate method.
Returns a dict that will be passed to exit_json
tenant user role state
------ ---- ---- --------
X present ensure_tenant_exists
X absent ensure_tenant_absent
X X present ensure_user_exists
X X absent ensure_user_absent
X X X present ensure_role_exists
X X X absent ensure_role_absent
"""
changed = False
id = None
if tenant and not user and not role and state == "present":
changed, id = ensure_tenant_exists(keystone, tenant,
tenant_description, check_mode)
elif tenant and not user and not role and state == "absent":
changed = ensure_tenant_absent(keystone, tenant, check_mode)
elif tenant and user and not role and state == "present":
changed, id = ensure_user_exists_and_is_current(keystone, endpoint, user, password,
email, tenant, check_mode)
elif tenant and user and not role and state == "absent":
changed = ensure_user_absent(keystone, user, check_mode)
elif tenant and user and role and state == "present":
changed, id = ensure_role_exists(keystone, user, tenant, role,
check_mode)
elif tenant and user and role and state == "absent":
changed = ensure_role_absent(keystone, user, tenant, role, check_mode)
else:
# Should never reach here
raise ValueError("Code should never reach here")
return dict(changed=changed, id=id)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implements vlans, bridges, and iptables rules using linux utilities."""
import calendar
import inspect
import netaddr
import os
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova import utils
LOG = logging.getLogger(__name__)
linux_net_opts = [
cfg.StrOpt('dhcpbridge_flagfile',
default='/etc/nova/nova-dhcpbridge.conf',
help='location of flagfile for dhcpbridge'),
cfg.StrOpt('networks_path',
default='$state_path/networks',
help='Location to keep network config files'),
cfg.StrOpt('public_interface',
default='eth0',
help='Interface for public IP addresses'),
cfg.StrOpt('network_device_mtu',
default=None,
help='MTU setting for vlan'),
cfg.StrOpt('dhcpbridge',
default='$bindir/nova-dhcpbridge',
help='location of nova-dhcpbridge'),
cfg.StrOpt('routing_source_ip',
default='$my_ip',
help='Public IP of network host'),
cfg.IntOpt('dhcp_lease_time',
default=120,
help='Lifetime of a DHCP lease in seconds'),
cfg.StrOpt('dns_server',
default=None,
help='if set, uses specific dns server for dnsmasq'),
cfg.ListOpt('dmz_cidr',
default=[],
help='A list of dmz range that should be accepted'),
cfg.StrOpt('dnsmasq_config_file',
default='',
help='Override the default dnsmasq settings with this file'),
cfg.StrOpt('linuxnet_interface_driver',
default='nova.network.linux_net.LinuxBridgeInterfaceDriver',
help='Driver used to create ethernet devices.'),
cfg.StrOpt('linuxnet_ovs_integration_bridge',
default='br-int',
help='Name of Open vSwitch bridge used with linuxnet'),
cfg.BoolOpt('send_arp_for_ha',
default=False,
help='send gratuitous ARPs for HA setup'),
cfg.BoolOpt('use_single_default_gateway',
default=False,
help='Use single default gateway. Only first nic of vm will '
'get default gateway from dhcp server'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(linux_net_opts)
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
# add up to 12 characters to binary_name which is used as a prefix,
# so we limit it to 16 characters.
# (max_chain_name_length - len('-POSTROUTING') == 16)
binary_name = os.path.basename(inspect.stack()[-1][1])[:16]
class IptablesRule(object):
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
IptablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False):
self.chain = chain
self.rule = rule
self.wrap = wrap
self.top = top
def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))
def __ne__(self, other):
return not self == other
def __str__(self):
if self.wrap:
chain = '%s-%s' % (binary_name, self.chain)
else:
chain = self.chain
return '-A %s %s' % (chain, self.rule)
class IptablesTable(object):
"""An iptables table."""
def __init__(self):
self.rules = []
self.chains = set()
self.unwrapped_chains = set()
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
so if nova-compute creates a chain named 'OUTPUT', it'll actually
end up named 'nova-compute-OUTPUT'.
"""
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)
def remove_chain(self, name, wrap=True):
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
if wrap:
chain_set = self.chains
else:
chain_set = self.unwrapped_chains
if name not in chain_set:
LOG.warn(_('Attempted to remove chain %s which does not exist'),
name)
return
chain_set.remove(name)
self.rules = filter(lambda r: r.chain != name, self.rules)
if wrap:
jump_snippet = '-j %s-%s' % (binary_name, name)
else:
jump_snippet = '-j %s' % (name,)
self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)
def add_rule(self, chain, rule, wrap=True, top=False):
"""Add a rule to the table.
This is just like what you'd feed to iptables, just without
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
if wrap and chain not in self.chains:
raise ValueError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(map(self._wrap_target_chain, rule.split(' ')))
self.rules.append(IptablesRule(chain, rule, wrap, top))
def _wrap_target_chain(self, s):
if s.startswith('$'):
return '%s-%s' % (binary_name, s[1:])
return s
def remove_rule(self, chain, rule, wrap=True, top=False):
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
try:
self.rules.remove(IptablesRule(chain, rule, wrap, top))
except ValueError:
LOG.warn(_('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chained_rules = [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
for rule in chained_rules:
self.rules.remove(rule)
class IptablesManager(object):
"""Wrapper for iptables.
See IptablesTable for some usage docs
A number of chains are set up to begin with.
First, nova-filter-top. It's added at the top of FORWARD and OUTPUT. Its
name is not wrapped, so it's shared between the various nova workers. It's
intended for rules that need to live at the top of the FORWARD and OUTPUT
chains. It's in both the ipv4 and ipv6 set of tables.
For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
"local" which is jumped to from nova-filter-top.
For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the built-in filter chains. Additionally,
there's a snat chain that is applied after the POSTROUTING chain.
"""
def __init__(self, execute=None):
if not execute:
self.execute = _execute
else:
self.execute = execute
self.ipv4 = {'filter': IptablesTable(),
'nat': IptablesTable()}
self.ipv6 = {'filter': IptablesTable()}
# Add a nova-filter-top chain. It's intended to be shared
# among the various nova components. It sits at the very top
# of FORWARD and OUTPUT.
for tables in [self.ipv4, self.ipv6]:
tables['filter'].add_chain('nova-filter-top', wrap=False)
tables['filter'].add_rule('FORWARD', '-j nova-filter-top',
wrap=False, top=True)
tables['filter'].add_rule('OUTPUT', '-j nova-filter-top',
wrap=False, top=True)
tables['filter'].add_chain('local')
tables['filter'].add_rule('nova-filter-top', '-j $local',
wrap=False)
# Wrap the built-in chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD'],
'nat': ['PREROUTING', 'OUTPUT', 'POSTROUTING']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
for ip_version in builtin_chains:
if ip_version == 4:
tables = self.ipv4
elif ip_version == 6:
tables = self.ipv6
for table, chains in builtin_chains[ip_version].iteritems():
for chain in chains:
tables[table].add_chain(chain)
tables[table].add_rule(chain, '-j $%s' % (chain,),
wrap=False)
# Add a nova-postrouting-bottom chain. It's intended to be shared
# among the various nova components. We set it as the last chain
# of POSTROUTING chain.
self.ipv4['nat'].add_chain('nova-postrouting-bottom', wrap=False)
self.ipv4['nat'].add_rule('POSTROUTING', '-j nova-postrouting-bottom',
wrap=False)
# We add a snat chain to the shared nova-postrouting-bottom chain
# so that it's applied last.
self.ipv4['nat'].add_chain('snat')
self.ipv4['nat'].add_rule('nova-postrouting-bottom', '-j $snat',
wrap=False)
# And then we add a float-snat chain and jump to first thing in
# the snat chain.
self.ipv4['nat'].add_chain('float-snat')
self.ipv4['nat'].add_rule('snat', '-j $float-snat')
@utils.synchronized('iptables', external=True)
def apply(self):
"""Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
"""
s = [('iptables', self.ipv4)]
if FLAGS.use_ipv6:
s += [('ip6tables', self.ipv6)]
for cmd, tables in s:
for table in tables:
current_table, _err = self.execute('%s-save' % (cmd,),
'-t', '%s' % (table,),
run_as_root=True,
attempts=5)
current_lines = current_table.split('\n')
new_filter = self._modify_rules(current_lines,
tables[table])
self.execute('%s-restore' % (cmd,), run_as_root=True,
process_input='\n'.join(new_filter),
attempts=5)
LOG.debug(_("IPTablesManager.apply completed with success"))
def _modify_rules(self, current_lines, table, binary=None):
unwrapped_chains = table.unwrapped_chains
chains = table.chains
rules = table.rules
# Remove any trace of our rules
new_filter = filter(lambda line: binary_name not in line,
current_lines)
seen_chains = False
rules_index = 0
for rules_index, rule in enumerate(new_filter):
if not seen_chains:
if rule.startswith(':'):
seen_chains = True
else:
if not rule.startswith(':'):
break
our_rules = []
for rule in rules:
rule_str = str(rule)
if rule.top:
# rule.top == True means we want this rule to be at the top.
# Further down, we weed out duplicates from the bottom of the
# list, so here we remove the dupes ahead of time.
new_filter = filter(lambda s: s.strip() != rule_str.strip(),
new_filter)
our_rules += [rule_str]
new_filter[rules_index:rules_index] = our_rules
new_filter[rules_index:rules_index] = [':%s - [0:0]' % (name,)
for name in unwrapped_chains]
new_filter[rules_index:rules_index] = [':%s-%s - [0:0]' %
(binary_name, name,)
for name in chains]
seen_lines = set()
def _weed_out_duplicates(line):
line = line.strip()
if line in seen_lines:
return False
else:
seen_lines.add(line)
return True
# We filter duplicates, letting the *last* occurrence take
# precedence.
new_filter.reverse()
new_filter = filter(_weed_out_duplicates, new_filter)
new_filter.reverse()
return new_filter
# NOTE(jkoelker) This is just a nice little stub point since mocking
# builtins with mox is a nightmare
def write_to_file(file, data, mode='w'):
with open(file, mode) as f:
f.write(data)
def ensure_path(path):
if not os.path.exists(path):
os.makedirs(path)
def metadata_forward():
"""Create forwarding rule for metadata."""
iptables_manager.ipv4['nat'].add_rule('PREROUTING',
'-s 0.0.0.0/0 -d 169.254.169.254/32 '
'-p tcp -m tcp --dport 80 -j DNAT '
'--to-destination %s:%s' %
(FLAGS.metadata_host,
FLAGS.metadata_port))
iptables_manager.apply()
def metadata_accept():
"""Create the filter accept rule for metadata."""
iptables_manager.ipv4['filter'].add_rule('INPUT',
'-s 0.0.0.0/0 -d %s '
'-p tcp -m tcp --dport %s '
'-j ACCEPT' %
(FLAGS.metadata_host,
FLAGS.metadata_port))
iptables_manager.apply()
def add_snat_rule(ip_range):
iptables_manager.ipv4['nat'].add_rule('snat',
'-s %s -j SNAT --to-source %s' %
(ip_range,
FLAGS.routing_source_ip))
iptables_manager.apply()
def init_host(ip_range=None):
"""Basic networking setup goes here."""
# NOTE(devcamcar): Cloud public SNAT entries and the default
# SNAT rule for outbound traffic.
if not ip_range:
ip_range = FLAGS.fixed_range
add_snat_rule(ip_range)
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %s -d %s/32 -j ACCEPT' %
(ip_range, FLAGS.metadata_host))
for dmz in FLAGS.dmz_cidr:
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %s -d %s -j ACCEPT' %
(ip_range, dmz))
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %(range)s -d %(range)s '
'-m conntrack ! --ctstate DNAT '
'-j ACCEPT' %
{'range': ip_range})
iptables_manager.apply()
def bind_floating_ip(floating_ip, device):
"""Bind ip to public interface."""
_execute('ip', 'addr', 'add', str(floating_ip) + '/32',
'dev', device,
run_as_root=True, check_exit_code=[0, 2, 254])
if FLAGS.send_arp_for_ha:
_execute('arping', '-U', floating_ip,
'-A', '-I', device,
'-c', 1, run_as_root=True, check_exit_code=False)
def unbind_floating_ip(floating_ip, device):
"""Unbind a public ip from public interface."""
_execute('ip', 'addr', 'del', str(floating_ip) + '/32',
'dev', device,
run_as_root=True, check_exit_code=[0, 2, 254])
def ensure_metadata_ip():
"""Sets up local metadata ip."""
_execute('ip', 'addr', 'add', '169.254.169.254/32',
'scope', 'link', 'dev', 'lo',
run_as_root=True, check_exit_code=[0, 2, 254])
def ensure_vpn_forward(public_ip, port, private_ip):
"""Sets up forwarding rules for vlan."""
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'-d %s -p udp '
'--dport 1194 '
'-j ACCEPT' % private_ip)
iptables_manager.ipv4['nat'].add_rule('PREROUTING',
'-d %s -p udp '
'--dport %s -j DNAT --to %s:1194' %
(public_ip, port, private_ip))
iptables_manager.ipv4['nat'].add_rule('OUTPUT',
'-d %s -p udp '
'--dport %s -j DNAT --to %s:1194' %
(public_ip, port, private_ip))
iptables_manager.apply()
def ensure_floating_forward(floating_ip, fixed_ip):
"""Ensure floating ip forwarding rule."""
for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
iptables_manager.ipv4['nat'].add_rule(chain, rule)
iptables_manager.apply()
def remove_floating_forward(floating_ip, fixed_ip):
"""Remove forwarding for floating ip."""
for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
iptables_manager.ipv4['nat'].remove_rule(chain, rule)
iptables_manager.apply()
def floating_forward_rules(floating_ip, fixed_ip):
return [('PREROUTING', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
('OUTPUT', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
('float-snat',
'-s %s -j SNAT --to %s' % (fixed_ip, floating_ip))]
def initialize_gateway_device(dev, network_ref):
if not network_ref:
return
_execute('sysctl', '-w', 'net.ipv4.ip_forward=1', run_as_root=True)
# NOTE(vish): The ip for dnsmasq has to be the first address on the
# bridge for it to respond to reqests properly
full_ip = '%s/%s' % (network_ref['dhcp_server'],
network_ref['cidr'].rpartition('/')[2])
new_ip_params = [[full_ip, 'brd', network_ref['broadcast']]]
old_ip_params = []
out, err = _execute('ip', 'addr', 'show', 'dev', dev,
'scope', 'global', run_as_root=True)
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == 'inet':
ip_params = fields[1:-1]
old_ip_params.append(ip_params)
if ip_params[0] != full_ip:
new_ip_params.append(ip_params)
if not old_ip_params or old_ip_params[0][0] != full_ip:
gateway = None
out, err = _execute('route', '-n', run_as_root=True)
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == '0.0.0.0' and fields[-1] == dev:
gateway = fields[1]
_execute('route', 'del', 'default', 'gw', gateway,
'dev', dev, run_as_root=True,
check_exit_code=[0, 7])
for ip_params in old_ip_params:
_execute(*_ip_bridge_cmd('del', ip_params, dev),
run_as_root=True, check_exit_code=[0, 2, 254])
for ip_params in new_ip_params:
_execute(*_ip_bridge_cmd('add', ip_params, dev),
run_as_root=True, check_exit_code=[0, 2, 254])
if gateway:
_execute('route', 'add', 'default', 'gw', gateway,
run_as_root=True, check_exit_code=[0, 7])
if FLAGS.send_arp_for_ha:
_execute('arping', '-U', network_ref['dhcp_server'],
'-A', '-I', dev,
'-c', 1, run_as_root=True, check_exit_code=False)
if(FLAGS.use_ipv6):
_execute('ip', '-f', 'inet6', 'addr',
'change', network_ref['cidr_v6'],
'dev', dev, run_as_root=True)
def get_dhcp_leases(context, network_ref):
"""Return a network's hosts config in dnsmasq leasefile format."""
hosts = []
host = None
if network_ref['multi_host']:
host = FLAGS.host
for data in db.network_get_associated_fixed_ips(context,
network_ref['id'],
host=host):
hosts.append(_host_lease(data))
return '\n'.join(hosts)
def get_dhcp_hosts(context, network_ref):
"""Get network's hosts config in dhcp-host format."""
hosts = []
host = None
if network_ref['multi_host']:
host = FLAGS.host
for data in db.network_get_associated_fixed_ips(context,
network_ref['id'],
host=host):
hosts.append(_host_dhcp(data))
return '\n'.join(hosts)
def _add_dnsmasq_accept_rules(dev):
"""Allow DHCP and DNS traffic through to dnsmasq."""
table = iptables_manager.ipv4['filter']
for port in [67, 53]:
for proto in ['udp', 'tcp']:
args = {'dev': dev, 'port': port, 'proto': proto}
table.add_rule('INPUT',
'-i %(dev)s -p %(proto)s -m %(proto)s '
'--dport %(port)s -j ACCEPT' % args)
iptables_manager.apply()
def get_dhcp_opts(context, network_ref):
"""Get network's hosts config in dhcp-opts format."""
hosts = []
host = None
if network_ref['multi_host']:
host = FLAGS.host
data = db.network_get_associated_fixed_ips(context,
network_ref['id'],
host=host)
if data:
#set of instance ids
instance_set = set([datum['instance_id'] for datum in data])
default_gw_vif = {}
for instance_id in instance_set:
vifs = db.virtual_interface_get_by_instance(context, instance_id)
if vifs:
#offer a default gateway to the first virtual interface
default_gw_vif[instance_id] = vifs[0]['id']
for datum in data:
if instance_id in default_gw_vif:
# we don't want default gateway for this fixed ip
if default_gw_vif[instance_id] != datum['vif_id']:
hosts.append(_host_dhcp_opts(datum))
return '\n'.join(hosts)
def release_dhcp(dev, address, mac_address):
utils.execute('dhcp_release', dev, address, mac_address, run_as_root=True)
def update_dhcp(context, dev, network_ref):
conffile = _dhcp_file(dev, 'conf')
write_to_file(conffile, get_dhcp_hosts(context, network_ref))
restart_dhcp(context, dev, network_ref)
def update_dhcp_hostfile_with_text(dev, hosts_text):
conffile = _dhcp_file(dev, 'conf')
write_to_file(conffile, hosts_text)
def kill_dhcp(dev):
pid = _dnsmasq_pid_for(dev)
if pid:
_execute('kill', '-9', pid, run_as_root=True)
# NOTE(ja): Sending a HUP only reloads the hostfile, so any
# configuration options (like dchp-range, vlan, ...)
# aren't reloaded.
@utils.synchronized('dnsmasq_start')
def restart_dhcp(context, dev, network_ref):
"""(Re)starts a dnsmasq server for a given network.
If a dnsmasq instance is already running then send a HUP
signal causing it to reload, otherwise spawn a new instance.
"""
conffile = _dhcp_file(dev, 'conf')
if FLAGS.use_single_default_gateway:
# NOTE(vish): this will have serious performance implications if we
# are not in multi_host mode.
optsfile = _dhcp_file(dev, 'opts')
write_to_file(optsfile, get_dhcp_opts(context, network_ref))
os.chmod(optsfile, 0644)
# Make sure dnsmasq can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0644)
pid = _dnsmasq_pid_for(dev)
# if dnsmasq is already running, then tell it to reload
if pid:
out, _err = _execute('cat', '/proc/%d/cmdline' % pid,
check_exit_code=False)
# Using symlinks can cause problems here so just compare the name
# of the file itself
if conffile.split('/')[-1] in out:
try:
_execute('kill', '-HUP', pid, run_as_root=True)
return
except Exception as exc: # pylint: disable=W0703
LOG.error(_('Hupping dnsmasq threw %s'), exc)
else:
LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), pid)
cmd = ['FLAGFILE=%s' % FLAGS.dhcpbridge_flagfile,
'NETWORK_ID=%s' % str(network_ref['id']),
'dnsmasq',
'--strict-order',
'--bind-interfaces',
'--conf-file=%s' % FLAGS.dnsmasq_config_file,
'--domain=%s' % FLAGS.dhcp_domain,
'--pid-file=%s' % _dhcp_file(dev, 'pid'),
'--listen-address=%s' % network_ref['dhcp_server'],
'--except-interface=lo',
'--dhcp-range=%s,static,%ss' % (network_ref['dhcp_start'],
FLAGS.dhcp_lease_time),
'--dhcp-lease-max=%s' % len(netaddr.IPNetwork(network_ref['cidr'])),
'--dhcp-hostsfile=%s' % _dhcp_file(dev, 'conf'),
'--dhcp-script=%s' % FLAGS.dhcpbridge,
'--leasefile-ro']
if FLAGS.dns_server:
cmd += ['-h', '-R', '--server=%s' % FLAGS.dns_server]
if FLAGS.use_single_default_gateway:
cmd += ['--dhcp-optsfile=%s' % _dhcp_file(dev, 'opts')]
_execute(*cmd, run_as_root=True)
_add_dnsmasq_accept_rules(dev)
@utils.synchronized('radvd_start')
def update_ra(context, dev, network_ref):
conffile = _ra_file(dev, 'conf')
conf_str = """
interface %s
{
AdvSendAdvert on;
MinRtrAdvInterval 3;
MaxRtrAdvInterval 10;
prefix %s
{
AdvOnLink on;
AdvAutonomous on;
};
};
""" % (dev, network_ref['cidr_v6'])
write_to_file(conffile, conf_str)
# Make sure radvd can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0644)
pid = _ra_pid_for(dev)
# if radvd is already running, then tell it to reload
if pid:
out, _err = _execute('cat', '/proc/%d/cmdline'
% pid, check_exit_code=False)
if conffile in out:
try:
_execute('kill', pid, run_as_root=True)
except Exception as exc: # pylint: disable=W0703
LOG.error(_('killing radvd threw %s'), exc)
else:
LOG.debug(_('Pid %d is stale, relaunching radvd'), pid)
cmd = ['radvd',
'-C', '%s' % _ra_file(dev, 'conf'),
'-p', '%s' % _ra_file(dev, 'pid')]
_execute(*cmd, run_as_root=True)
def _host_lease(data):
"""Return a host string for an address in leasefile format."""
if data['instance_updated']:
timestamp = data['instance_updated']
else:
timestamp = data['instance_created']
seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
return '%d %s %s %s *' % (seconds_since_epoch + FLAGS.dhcp_lease_time,
data['vif_address'],
data['address'],
data['instance_hostname'] or '*')
def _host_dhcp_network(data):
return 'NW-%s' % data['vif_id']
def _host_dhcp(data):
"""Return a host string for an address in dhcp-host format."""
if FLAGS.use_single_default_gateway:
return '%s,%s.%s,%s,%s' % (data['vif_address'],
data['instance_hostname'],
FLAGS.dhcp_domain,
data['address'],
'net:' + _host_dhcp_network(data))
else:
return '%s,%s.%s,%s' % (data['vif_address'],
data['instance_hostname'],
FLAGS.dhcp_domain,
data['address'])
def _host_dhcp_opts(data):
"""Return an empty gateway option."""
return '%s,%s' % (_host_dhcp_network(data), 3)
def _execute(*cmd, **kwargs):
"""Wrapper around utils._execute for fake_network."""
if FLAGS.fake_network:
LOG.debug('FAKE NET: %s', ' '.join(map(str, cmd)))
return 'fake', 0
else:
return utils.execute(*cmd, **kwargs)
def _device_exists(device):
"""Check if ethernet device exists."""
(_out, err) = _execute('ip', 'link', 'show', 'dev', device,
check_exit_code=False, run_as_root=True)
return not err
def _dhcp_file(dev, kind):
"""Return path to a pid, leases or conf file for a bridge/device."""
ensure_path(FLAGS.networks_path)
return os.path.abspath('%s/nova-%s.%s' % (FLAGS.networks_path,
dev,
kind))
def _ra_file(dev, kind):
"""Return path to a pid or conf file for a bridge/device."""
ensure_path(FLAGS.networks_path)
return os.path.abspath('%s/nova-ra-%s.%s' % (FLAGS.networks_path,
dev,
kind))
def _dnsmasq_pid_for(dev):
"""Returns the pid for prior dnsmasq instance for a bridge/device.
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
"""
pid_file = _dhcp_file(dev, 'pid')
if os.path.exists(pid_file):
try:
with open(pid_file, 'r') as f:
return int(f.read())
except (ValueError, IOError):
return None
def _ra_pid_for(dev):
"""Returns the pid for prior radvd instance for a bridge/device.
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
"""
pid_file = _ra_file(dev, 'pid')
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
return int(f.read())
def _ip_bridge_cmd(action, params, device):
"""Build commands to add/del ips to bridges/devices."""
cmd = ['ip', 'addr', action]
cmd.extend(params)
cmd.extend(['dev', device])
return cmd
# Similar to compute virt layers, the Linux network node
# code uses a flexible driver model to support different ways
# of creating ethernet interfaces and attaching them to the network.
# In the case of a network host, these interfaces
# act as gateway/dhcp/vpn/etc. endpoints not VM interfaces.
interface_driver = None
def _get_interface_driver():
global interface_driver
if not interface_driver:
interface_driver = importutils.import_object(
FLAGS.linuxnet_interface_driver)
return interface_driver
def plug(network, mac_address, gateway=True):
return _get_interface_driver().plug(network, mac_address, gateway)
def unplug(network):
return _get_interface_driver().unplug(network)
def get_dev(network):
return _get_interface_driver().get_dev(network)
class LinuxNetInterfaceDriver(object):
"""Abstract class that defines generic network host API"""
""" for for all Linux interface drivers."""
def plug(self, network, mac_address):
"""Create Linux device, return device name"""
raise NotImplementedError()
def unplug(self, network):
"""Destory Linux device, return device name"""
raise NotImplementedError()
def get_dev(self, network):
"""Get device name"""
raise NotImplementedError()
# plugs interfaces using Linux Bridge
class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
def plug(self, network, mac_address, gateway=True):
if network.get('vlan', None) is not None:
iface = FLAGS.vlan_interface or network['bridge_interface']
LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
network['vlan'],
network['bridge'],
iface,
network,
mac_address)
else:
iface = FLAGS.flat_interface or network['bridge_interface']
LinuxBridgeInterfaceDriver.ensure_bridge(
network['bridge'],
iface,
network, gateway)
# NOTE(vish): applying here so we don't get a lock conflict
iptables_manager.apply()
return network['bridge']
def unplug(self, network):
return self.get_dev(network)
def get_dev(self, network):
return network['bridge']
@classmethod
def ensure_vlan_bridge(_self, vlan_num, bridge, bridge_interface,
net_attrs=None, mac_address=None):
"""Create a vlan and bridge unless they already exist."""
interface = LinuxBridgeInterfaceDriver.ensure_vlan(vlan_num,
bridge_interface, mac_address)
LinuxBridgeInterfaceDriver.ensure_bridge(bridge, interface, net_attrs)
return interface
@classmethod
@utils.synchronized('ensure_vlan', external=True)
def ensure_vlan(_self, vlan_num, bridge_interface, mac_address=None):
"""Create a vlan unless it already exists."""
interface = 'vlan%s' % vlan_num
if not _device_exists(interface):
LOG.debug(_('Starting VLAN inteface %s'), interface)
_execute('ip', 'link', 'add', 'link', bridge_interface,
'name', interface, 'type', 'vlan',
'id', vlan_num, run_as_root=True)
# (danwent) the bridge will inherit this address, so we want to
# make sure it is the value set from the NetworkManager
if mac_address:
_execute('ip', 'link', 'set', interface, 'address',
mac_address, run_as_root=True)
_execute('ip', 'link', 'set', interface, 'up', run_as_root=True)
if FLAGS.network_device_mtu:
_execute('ip', 'link', 'set', interface, 'mtu',
FLAGS.network_device_mtu, run_as_root=True)
return interface
@classmethod
@utils.synchronized('ensure_bridge', external=True)
def ensure_bridge(_self, bridge, interface, net_attrs=None, gateway=True):
"""Create a bridge unless it already exists.
:param interface: the interface to create the bridge on.
:param net_attrs: dictionary with attributes used to create bridge.
If net_attrs is set, it will add the net_attrs['gateway'] to the bridge
using net_attrs['broadcast'] and net_attrs['cidr']. It will also add
the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set.
The code will attempt to move any ips that already exist on the
interface onto the bridge and reset the default gateway if necessary.
"""
if not _device_exists(bridge):
LOG.debug(_('Starting Bridge interface for %s'), interface)
_execute('brctl', 'addbr', bridge, run_as_root=True)
_execute('brctl', 'setfd', bridge, 0, run_as_root=True)
# _execute('brctl setageing %s 10' % bridge, run_as_root=True)
_execute('brctl', 'stp', bridge, 'off', run_as_root=True)
# (danwent) bridge device MAC address can't be set directly.
# instead it inherits the MAC address of the first device on the
# bridge, which will either be the vlan interface, or a
# physical NIC.
_execute('ip', 'link', 'set', bridge, 'up', run_as_root=True)
if interface:
out, err = _execute('brctl', 'addif', bridge, interface,
check_exit_code=False, run_as_root=True)
# NOTE(vish): This will break if there is already an ip on the
# interface, so we move any ips to the bridge
old_gateway = None
out, err = _execute('route', '-n', run_as_root=True)
for line in out.split('\n'):
fields = line.split()
if (fields and fields[0] == '0.0.0.0' and
fields[-1] == interface):
old_gateway = fields[1]
_execute('route', 'del', 'default', 'gw', old_gateway,
'dev', interface, run_as_root=True,
check_exit_code=[0, 7])
out, err = _execute('ip', 'addr', 'show', 'dev', interface,
'scope', 'global', run_as_root=True)
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == 'inet':
params = fields[1:-1]
_execute(*_ip_bridge_cmd('del', params, fields[-1]),
run_as_root=True, check_exit_code=[0, 2, 254])
_execute(*_ip_bridge_cmd('add', params, bridge),
run_as_root=True, check_exit_code=[0, 2, 254])
if old_gateway:
_execute('route', 'add', 'default', 'gw', old_gateway,
run_as_root=True, check_exit_code=[0, 7])
if (err and err != "device %s is already a member of a bridge;"
"can't enslave it to bridge %s.\n" % (interface, bridge)):
msg = _('Failed to add interface: %s') % err
raise exception.NovaException(msg)
# Don't forward traffic unless we were told to be a gateway
ipv4_filter = iptables_manager.ipv4['filter']
if gateway:
ipv4_filter.add_rule('FORWARD',
'--in-interface %s -j ACCEPT' % bridge)
ipv4_filter.add_rule('FORWARD',
'--out-interface %s -j ACCEPT' % bridge)
else:
ipv4_filter.add_rule('FORWARD',
'--in-interface %s -j DROP' % bridge)
ipv4_filter.add_rule('FORWARD',
'--out-interface %s -j DROP' % bridge)
# plugs interfaces using Open vSwitch
class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver):
def plug(self, network, mac_address, gateway=True):
dev = self.get_dev(network)
if not _device_exists(dev):
bridge = FLAGS.linuxnet_ovs_integration_bridge
_execute('ovs-vsctl',
'--', '--may-exist', 'add-port', bridge, dev,
'--', 'set', 'Interface', dev, 'type=internal',
'--', 'set', 'Interface', dev,
'external-ids:iface-id=%s' % dev,
'--', 'set', 'Interface', dev,
'external-ids:iface-status=active',
'--', 'set', 'Interface', dev,
'external-ids:attached-mac=%s' % mac_address,
run_as_root=True)
_execute('ip', 'link', 'set', dev, 'address', mac_address,
run_as_root=True)
if FLAGS.network_device_mtu:
_execute('ip', 'link', 'set', dev, 'mtu',
FLAGS.network_device_mtu, run_as_root=True)
_execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
if not gateway:
# If we weren't instructed to act as a gateway then add the
# appropriate flows to block all non-dhcp traffic.
_execute('ovs-ofctl',
'add-flow', bridge, 'priority=1,actions=drop',
run_as_root=True)
_execute('ovs-ofctl', 'add-flow', bridge,
'udp,tp_dst=67,dl_dst=%s,priority=2,actions=normal' %
mac_address, run_as_root=True)
# .. and make sure iptbles won't forward it as well.
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--in-interface %s -j DROP' % bridge)
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j DROP' % bridge)
else:
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--in-interface %s -j ACCEPT' % bridge)
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j ACCEPT' % bridge)
return dev
def unplug(self, network):
dev = self.get_dev(network)
bridge = FLAGS.linuxnet_ovs_integration_bridge
_execute('ovs-vsctl', '--', '--if-exists', 'del-port',
bridge, dev, run_as_root=True)
return dev
def get_dev(self, network):
dev = 'gw-' + str(network['uuid'][0:11])
return dev
# plugs interfaces using Linux Bridge when using QuantumManager
class QuantumLinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
BRIDGE_NAME_PREFIX = 'brq'
GATEWAY_INTERFACE_PREFIX = 'gw-'
def plug(self, network, mac_address, gateway=True):
dev = self.get_dev(network)
bridge = self.get_bridge(network)
if not gateway:
# If we weren't instructed to act as a gateway then add the
# appropriate flows to block all non-dhcp traffic.
# .. and make sure iptbles won't forward it as well.
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--in-interface %s -j DROP' % bridge)
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j DROP' % bridge)
return bridge
else:
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--in-interface %s -j ACCEPT' % bridge)
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j ACCEPT' % bridge)
QuantumLinuxBridgeInterfaceDriver.create_tap_dev(dev, mac_address)
if not _device_exists(bridge):
LOG.debug(_("Starting bridge %s "), bridge)
utils.execute('brctl', 'addbr', bridge, run_as_root=True)
utils.execute('brctl', 'setfd', bridge, str(0), run_as_root=True)
utils.execute('brctl', 'stp', bridge, 'off', run_as_root=True)
utils.execute('ip', 'link', 'set', bridge, 'address', mac_address,
run_as_root=True)
utils.execute('ip', 'link', 'set', bridge, 'up', run_as_root=True)
LOG.debug(_("Done starting bridge %s"), bridge)
full_ip = '%s/%s' % (network['dhcp_server'],
network['cidr'].rpartition('/')[2])
utils.execute('ip', 'address', 'add', full_ip, 'dev', bridge,
run_as_root=True)
return dev
def unplug(self, network):
dev = self.get_dev(network)
if not _device_exists(dev):
return None
else:
try:
utils.execute('ip', 'link', 'delete', dev, run_as_root=True)
except exception.ProcessExecutionError:
LOG.error(_("Failed unplugging gateway interface '%s'"), dev)
raise
LOG.debug(_("Unplugged gateway interface '%s'"), dev)
return dev
@classmethod
def create_tap_dev(_self, dev, mac_address=None):
if not _device_exists(dev):
try:
# First, try with 'ip'
utils.execute('ip', 'tuntap', 'add', dev, 'mode', 'tap',
run_as_root=True)
except exception.ProcessExecutionError:
# Second option: tunctl
utils.execute('tunctl', '-b', '-t', dev, run_as_root=True)
if mac_address:
utils.execute('ip', 'link', 'set', dev, 'address', mac_address,
run_as_root=True)
utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
def get_dev(self, network):
dev = self.GATEWAY_INTERFACE_PREFIX + str(network['uuid'][0:11])
return dev
def get_bridge(self, network):
bridge = self.BRIDGE_NAME_PREFIX + str(network['uuid'][0:11])
return bridge
iptables_manager = IptablesManager()
|
|
"""
aggregation.py contains utility functions to handle multiple named and lambda
kwarg aggregations in groupby and DataFrame/Series aggregation
"""
from collections import defaultdict
from functools import partial
from typing import (
TYPE_CHECKING,
Any,
Callable,
DefaultDict,
Dict,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from pandas._typing import (
AggFuncType,
AggFuncTypeBase,
AggFuncTypeDict,
AggObjType,
Axis,
FrameOrSeries,
FrameOrSeriesUnion,
Label,
)
from pandas.core.dtypes.cast import is_nested_object
from pandas.core.dtypes.common import is_dict_like, is_list_like
from pandas.core.dtypes.generic import ABCDataFrame, ABCNDFrame, ABCSeries
from pandas.core.base import DataError, SpecificationError
import pandas.core.common as com
from pandas.core.indexes.api import Index
if TYPE_CHECKING:
from pandas.core.series import Series
def reconstruct_func(
func: Optional[AggFuncType], **kwargs
) -> Tuple[bool, Optional[AggFuncType], Optional[List[str]], Optional[List[int]]]:
"""
This is the internal function to reconstruct func given if there is relabeling
or not and also normalize the keyword to get new order of columns.
If named aggregation is applied, `func` will be None, and kwargs contains the
column and aggregation function information to be parsed;
If named aggregation is not applied, `func` is either string (e.g. 'min') or
Callable, or list of them (e.g. ['min', np.max]), or the dictionary of column name
and str/Callable/list of them (e.g. {'A': 'min'}, or {'A': [np.min, lambda x: x]})
If relabeling is True, will return relabeling, reconstructed func, column
names, and the reconstructed order of columns.
If relabeling is False, the columns and order will be None.
Parameters
----------
func: agg function (e.g. 'min' or Callable) or list of agg functions
(e.g. ['min', np.max]) or dictionary (e.g. {'A': ['min', np.max]}).
**kwargs: dict, kwargs used in is_multi_agg_with_relabel and
normalize_keyword_aggregation function for relabelling
Returns
-------
relabelling: bool, if there is relabelling or not
func: normalized and mangled func
columns: list of column names
order: list of columns indices
Examples
--------
>>> reconstruct_func(None, **{"foo": ("col", "min")})
(True, defaultdict(<class 'list'>, {'col': ['min']}), ('foo',), array([0]))
>>> reconstruct_func("min")
(False, 'min', None, None)
"""
relabeling = func is None and is_multi_agg_with_relabel(**kwargs)
columns: Optional[List[str]] = None
order: Optional[List[int]] = None
if not relabeling:
if isinstance(func, list) and len(func) > len(set(func)):
# GH 28426 will raise error if duplicated function names are used and
# there is no reassigned name
raise SpecificationError(
"Function names must be unique if there is no new column names "
"assigned"
)
elif func is None:
# nicer error message
raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).")
if relabeling:
func, columns, order = normalize_keyword_aggregation(kwargs)
return relabeling, func, columns, order
def is_multi_agg_with_relabel(**kwargs) -> bool:
"""
Check whether kwargs passed to .agg look like multi-agg with relabeling.
Parameters
----------
**kwargs : dict
Returns
-------
bool
Examples
--------
>>> is_multi_agg_with_relabel(a="max")
False
>>> is_multi_agg_with_relabel(a_max=("a", "max"), a_min=("a", "min"))
True
>>> is_multi_agg_with_relabel()
False
"""
return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and (
len(kwargs) > 0
)
def normalize_keyword_aggregation(kwargs: dict) -> Tuple[dict, List[str], List[int]]:
"""
Normalize user-provided "named aggregation" kwargs.
Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs
to the old Dict[str, List[scalar]]].
Parameters
----------
kwargs : dict
Returns
-------
aggspec : dict
The transformed kwargs.
columns : List[str]
The user-provided keys.
col_idx_order : List[int]
List of columns indices.
Examples
--------
>>> normalize_keyword_aggregation({"output": ("input", "sum")})
(defaultdict(<class 'list'>, {'input': ['sum']}), ('output',), array([0]))
"""
# Normalize the aggregation functions as Mapping[column, List[func]],
# process normally, then fixup the names.
# TODO: aggspec type: typing.Dict[str, List[AggScalar]]
# May be hitting https://github.com/python/mypy/issues/5958
# saying it doesn't have an attribute __name__
aggspec: DefaultDict = defaultdict(list)
order = []
columns, pairs = list(zip(*kwargs.items()))
for name, (column, aggfunc) in zip(columns, pairs):
aggspec[column].append(aggfunc)
order.append((column, com.get_callable_name(aggfunc) or aggfunc))
# uniquify aggfunc name if duplicated in order list
uniquified_order = _make_unique_kwarg_list(order)
# GH 25719, due to aggspec will change the order of assigned columns in aggregation
# uniquified_aggspec will store uniquified order list and will compare it with order
# based on index
aggspec_order = [
(column, com.get_callable_name(aggfunc) or aggfunc)
for column, aggfuncs in aggspec.items()
for aggfunc in aggfuncs
]
uniquified_aggspec = _make_unique_kwarg_list(aggspec_order)
# get the new index of columns by comparison
col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order)
return aggspec, columns, col_idx_order
def _make_unique_kwarg_list(
seq: Sequence[Tuple[Any, Any]]
) -> Sequence[Tuple[Any, Any]]:
"""
Uniquify aggfunc name of the pairs in the order list
Examples:
--------
>>> kwarg_list = [('a', '<lambda>'), ('a', '<lambda>'), ('b', '<lambda>')]
>>> _make_unique_kwarg_list(kwarg_list)
[('a', '<lambda>_0'), ('a', '<lambda>_1'), ('b', '<lambda>')]
"""
return [
(pair[0], "_".join([pair[1], str(seq[:i].count(pair))]))
if seq.count(pair) > 1
else pair
for i, pair in enumerate(seq)
]
# TODO: Can't use, because mypy doesn't like us setting __name__
# error: "partial[Any]" has no attribute "__name__"
# the type is:
# typing.Sequence[Callable[..., ScalarResult]]
# -> typing.Sequence[Callable[..., ScalarResult]]:
def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]:
"""
Possibly mangle a list of aggfuncs.
Parameters
----------
aggfuncs : Sequence
Returns
-------
mangled: list-like
A new AggSpec sequence, where lambdas have been converted
to have unique names.
Notes
-----
If just one aggfunc is passed, the name will not be mangled.
"""
if len(aggfuncs) <= 1:
# don't mangle for .agg([lambda x: .])
return aggfuncs
i = 0
mangled_aggfuncs = []
for aggfunc in aggfuncs:
if com.get_callable_name(aggfunc) == "<lambda>":
aggfunc = partial(aggfunc)
aggfunc.__name__ = f"<lambda_{i}>"
i += 1
mangled_aggfuncs.append(aggfunc)
return mangled_aggfuncs
def maybe_mangle_lambdas(agg_spec: Any) -> Any:
"""
Make new lambdas with unique names.
Parameters
----------
agg_spec : Any
An argument to GroupBy.agg.
Non-dict-like `agg_spec` are pass through as is.
For dict-like `agg_spec` a new spec is returned
with name-mangled lambdas.
Returns
-------
mangled : Any
Same type as the input.
Examples
--------
>>> maybe_mangle_lambdas('sum')
'sum'
>>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP
[<function __main__.<lambda_0>,
<function pandas...._make_lambda.<locals>.f(*args, **kwargs)>]
"""
is_dict = is_dict_like(agg_spec)
if not (is_dict or is_list_like(agg_spec)):
return agg_spec
mangled_aggspec = type(agg_spec)() # dict or OrderedDict
if is_dict:
for key, aggfuncs in agg_spec.items():
if is_list_like(aggfuncs) and not is_dict_like(aggfuncs):
mangled_aggfuncs = _managle_lambda_list(aggfuncs)
else:
mangled_aggfuncs = aggfuncs
mangled_aggspec[key] = mangled_aggfuncs
else:
mangled_aggspec = _managle_lambda_list(agg_spec)
return mangled_aggspec
def relabel_result(
result: FrameOrSeries,
func: Dict[str, List[Union[Callable, str]]],
columns: Iterable[Label],
order: Iterable[int],
) -> Dict[Label, "Series"]:
"""
Internal function to reorder result if relabelling is True for
dataframe.agg, and return the reordered result in dict.
Parameters:
----------
result: Result from aggregation
func: Dict of (column name, funcs)
columns: New columns name for relabelling
order: New order for relabelling
Examples:
---------
>>> result = DataFrame({"A": [np.nan, 2, np.nan],
... "C": [6, np.nan, np.nan], "B": [np.nan, 4, 2.5]}) # doctest: +SKIP
>>> funcs = {"A": ["max"], "C": ["max"], "B": ["mean", "min"]}
>>> columns = ("foo", "aab", "bar", "dat")
>>> order = [0, 1, 2, 3]
>>> _relabel_result(result, func, columns, order) # doctest: +SKIP
dict(A=Series([2.0, NaN, NaN, NaN], index=["foo", "aab", "bar", "dat"]),
C=Series([NaN, 6.0, NaN, NaN], index=["foo", "aab", "bar", "dat"]),
B=Series([NaN, NaN, 2.5, 4.0], index=["foo", "aab", "bar", "dat"]))
"""
reordered_indexes = [
pair[0] for pair in sorted(zip(columns, order), key=lambda t: t[1])
]
reordered_result_in_dict: Dict[Label, "Series"] = {}
idx = 0
reorder_mask = not isinstance(result, ABCSeries) and len(result.columns) > 1
for col, fun in func.items():
s = result[col].dropna()
# In the `_aggregate`, the callable names are obtained and used in `result`, and
# these names are ordered alphabetically. e.g.
# C2 C1
# <lambda> 1 NaN
# amax NaN 4.0
# max NaN 4.0
# sum 18.0 6.0
# Therefore, the order of functions for each column could be shuffled
# accordingly so need to get the callable name if it is not parsed names, and
# reorder the aggregated result for each column.
# e.g. if df.agg(c1=("C2", sum), c2=("C2", lambda x: min(x))), correct order is
# [sum, <lambda>], but in `result`, it will be [<lambda>, sum], and we need to
# reorder so that aggregated values map to their functions regarding the order.
# However there is only one column being used for aggregation, not need to
# reorder since the index is not sorted, and keep as is in `funcs`, e.g.
# A
# min 1.0
# mean 1.5
# mean 1.5
if reorder_mask:
fun = [
com.get_callable_name(f) if not isinstance(f, str) else f for f in fun
]
col_idx_order = Index(s.index).get_indexer(fun)
s = s[col_idx_order]
# assign the new user-provided "named aggregation" as index names, and reindex
# it based on the whole user-provided names.
s.index = reordered_indexes[idx : idx + len(fun)]
reordered_result_in_dict[col] = s.reindex(columns, copy=False)
idx = idx + len(fun)
return reordered_result_in_dict
def validate_func_kwargs(
kwargs: dict,
) -> Tuple[List[str], List[Union[str, Callable[..., Any]]]]:
"""
Validates types of user-provided "named aggregation" kwargs.
`TypeError` is raised if aggfunc is not `str` or callable.
Parameters
----------
kwargs : dict
Returns
-------
columns : List[str]
List of user-provied keys.
func : List[Union[str, callable[...,Any]]]
List of user-provided aggfuncs
Examples
--------
>>> validate_func_kwargs({'one': 'min', 'two': 'max'})
(['one', 'two'], ['min', 'max'])
"""
no_arg_message = "Must provide 'func' or named aggregation **kwargs."
tuple_given_message = "func is expected but received {} in **kwargs."
columns = list(kwargs)
func = []
for col_func in kwargs.values():
if not (isinstance(col_func, str) or callable(col_func)):
raise TypeError(tuple_given_message.format(type(col_func).__name__))
func.append(col_func)
if not columns:
raise TypeError(no_arg_message)
return columns, func
def transform(
obj: FrameOrSeries, func: AggFuncType, axis: Axis, *args, **kwargs
) -> FrameOrSeriesUnion:
"""
Transform a DataFrame or Series
Parameters
----------
obj : DataFrame or Series
Object to compute the transform on.
func : string, function, list, or dictionary
Function(s) to compute the transform with.
axis : {0 or 'index', 1 or 'columns'}
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
Returns
-------
DataFrame or Series
Result of applying ``func`` along the given axis of the
Series or DataFrame.
Raises
------
ValueError
If the transform function fails or does not transform.
"""
is_series = obj.ndim == 1
if obj._get_axis_number(axis) == 1:
assert not is_series
return transform(obj.T, func, 0, *args, **kwargs).T
if is_list_like(func) and not is_dict_like(func):
func = cast(List[AggFuncTypeBase], func)
# Convert func equivalent dict
if is_series:
func = {com.get_callable_name(v) or v: v for v in func}
else:
func = {col: func for col in obj}
if is_dict_like(func):
func = cast(AggFuncTypeDict, func)
return transform_dict_like(obj, func, *args, **kwargs)
# func is either str or callable
func = cast(AggFuncTypeBase, func)
try:
result = transform_str_or_callable(obj, func, *args, **kwargs)
except Exception:
raise ValueError("Transform function failed")
# Functions that transform may return empty Series/DataFrame
# when the dtype is not appropriate
if isinstance(result, (ABCSeries, ABCDataFrame)) and result.empty:
raise ValueError("Transform function failed")
if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals(
obj.index
):
raise ValueError("Function did not transform")
return result
def transform_dict_like(
obj: FrameOrSeries,
func: AggFuncTypeDict,
*args,
**kwargs,
):
"""
Compute transform in the case of a dict-like func
"""
from pandas.core.reshape.concat import concat
if len(func) == 0:
raise ValueError("No transform functions were provided")
if obj.ndim != 1:
# Check for missing columns on a frame
cols = sorted(set(func.keys()) - set(obj.columns))
if len(cols) > 0:
raise SpecificationError(f"Column(s) {cols} do not exist")
# Can't use func.values(); wouldn't work for a Series
if any(is_dict_like(v) for _, v in func.items()):
# GH 15931 - deprecation of renaming keys
raise SpecificationError("nested renamer is not supported")
results: Dict[Label, FrameOrSeriesUnion] = {}
for name, how in func.items():
colg = obj._gotitem(name, ndim=1)
try:
results[name] = transform(colg, how, 0, *args, **kwargs)
except Exception as err:
if (
str(err) == "Function did not transform"
or str(err) == "No transform functions were provided"
):
raise err
# combine results
if len(results) == 0:
raise ValueError("Transform function failed")
return concat(results, axis=1)
def transform_str_or_callable(
obj: FrameOrSeries, func: AggFuncTypeBase, *args, **kwargs
) -> FrameOrSeriesUnion:
"""
Compute transform in the case of a string or callable func
"""
if isinstance(func, str):
return obj._try_aggregate_string_function(func, *args, **kwargs)
if not args and not kwargs:
f = obj._get_cython_func(func)
if f:
return getattr(obj, f)()
# Two possible ways to use a UDF - apply or call directly
try:
return obj.apply(func, args=args, **kwargs)
except Exception:
return func(obj, *args, **kwargs)
def aggregate(
obj: AggObjType,
arg: AggFuncType,
*args,
**kwargs,
):
"""
Provide an implementation for the aggregators.
Parameters
----------
obj : Pandas object to compute aggregation on.
arg : string, dict, function.
*args : args to pass on to the function.
**kwargs : kwargs to pass on to the function.
Returns
-------
tuple of result, how.
Notes
-----
how can be a string describe the required post-processing, or
None if not required.
"""
_axis = kwargs.pop("_axis", None)
if _axis is None:
_axis = getattr(obj, "axis", 0)
if isinstance(arg, str):
return obj._try_aggregate_string_function(arg, *args, **kwargs), None
elif is_dict_like(arg):
arg = cast(AggFuncTypeDict, arg)
return agg_dict_like(obj, arg, _axis), True
elif is_list_like(arg):
# we require a list, but not an 'str'
arg = cast(List[AggFuncTypeBase], arg)
return agg_list_like(obj, arg, _axis=_axis), None
else:
result = None
if callable(arg):
f = obj._get_cython_func(arg)
if f and not args and not kwargs:
return getattr(obj, f)(), None
# caller can react
return result, True
def agg_list_like(
obj: AggObjType,
arg: List[AggFuncTypeBase],
_axis: int,
) -> FrameOrSeriesUnion:
"""
Compute aggregation in the case of a list-like argument.
Parameters
----------
obj : Pandas object to compute aggregation on.
arg : list
Aggregations to compute.
_axis : int, 0 or 1
Axis to compute aggregation on.
Returns
-------
Result of aggregation.
"""
from pandas.core.reshape.concat import concat
if _axis != 0:
raise NotImplementedError("axis other than 0 is not supported")
if obj._selected_obj.ndim == 1:
selected_obj = obj._selected_obj
else:
selected_obj = obj._obj_with_exclusions
results = []
keys = []
# degenerate case
if selected_obj.ndim == 1:
for a in arg:
colg = obj._gotitem(selected_obj.name, ndim=1, subset=selected_obj)
try:
new_res = colg.aggregate(a)
except TypeError:
pass
else:
results.append(new_res)
# make sure we find a good name
name = com.get_callable_name(a) or a
keys.append(name)
# multiples
else:
for index, col in enumerate(selected_obj):
colg = obj._gotitem(col, ndim=1, subset=selected_obj.iloc[:, index])
try:
new_res = colg.aggregate(arg)
except (TypeError, DataError):
pass
except ValueError as err:
# cannot aggregate
if "Must produce aggregated value" in str(err):
# raised directly in _aggregate_named
pass
elif "no results" in str(err):
# raised directly in _aggregate_multiple_funcs
pass
else:
raise
else:
results.append(new_res)
keys.append(col)
# if we are empty
if not len(results):
raise ValueError("no results")
try:
return concat(results, keys=keys, axis=1, sort=False)
except TypeError as err:
# we are concatting non-NDFrame objects,
# e.g. a list of scalars
from pandas import Series
result = Series(results, index=keys, name=obj.name)
if is_nested_object(result):
raise ValueError(
"cannot combine transform and aggregation operations"
) from err
return result
def agg_dict_like(
obj: AggObjType,
arg: AggFuncTypeDict,
_axis: int,
) -> FrameOrSeriesUnion:
"""
Compute aggregation in the case of a dict-like argument.
Parameters
----------
obj : Pandas object to compute aggregation on.
arg : dict
label-aggregation pairs to compute.
_axis : int, 0 or 1
Axis to compute aggregation on.
Returns
-------
Result of aggregation.
"""
is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
if _axis != 0: # pragma: no cover
raise ValueError("Can only pass dict with axis=0")
selected_obj = obj._selected_obj
# if we have a dict of any non-scalars
# eg. {'A' : ['mean']}, normalize all to
# be list-likes
if any(is_aggregator(x) for x in arg.values()):
new_arg: AggFuncTypeDict = {}
for k, v in arg.items():
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
# the keys must be in the columns
# for ndim=2, or renamers for ndim=1
# ok for now, but deprecated
# {'A': { 'ra': 'mean' }}
# {'A': { 'ra': ['mean'] }}
# {'ra': ['mean']}
# not ok
# {'ra' : { 'A' : 'mean' }}
if isinstance(v, dict):
raise SpecificationError("nested renamer is not supported")
elif isinstance(selected_obj, ABCSeries):
raise SpecificationError("nested renamer is not supported")
elif (
isinstance(selected_obj, ABCDataFrame) and k not in selected_obj.columns
):
raise KeyError(f"Column '{k}' does not exist!")
arg = new_arg
else:
# deprecation of renaming keys
# GH 15931
keys = list(arg.keys())
if isinstance(selected_obj, ABCDataFrame) and len(
selected_obj.columns.intersection(keys)
) != len(keys):
cols = sorted(set(keys) - set(selected_obj.columns.intersection(keys)))
raise SpecificationError(f"Column(s) {cols} do not exist")
from pandas.core.reshape.concat import concat
if selected_obj.ndim == 1:
# key only used for output
colg = obj._gotitem(obj._selection, ndim=1)
results = {key: colg.agg(how) for key, how in arg.items()}
else:
# key used for column selection and output
results = {key: obj._gotitem(key, ndim=1).agg(how) for key, how in arg.items()}
# set the final keys
keys = list(arg.keys())
# Avoid making two isinstance calls in all and any below
is_ndframe = [isinstance(r, ABCNDFrame) for r in results.values()]
# combine results
if all(is_ndframe):
keys_to_use = [k for k in keys if not results[k].empty]
# Have to check, if at least one DataFrame is not empty.
keys_to_use = keys_to_use if keys_to_use != [] else keys
axis = 0 if isinstance(obj, ABCSeries) else 1
result = concat({k: results[k] for k in keys_to_use}, axis=axis)
elif any(is_ndframe):
# There is a mix of NDFrames and scalars
raise ValueError(
"cannot perform both aggregation "
"and transformation operations "
"simultaneously"
)
else:
from pandas import Series
# we have a dict of scalars
# GH 36212 use name only if obj is a series
if obj.ndim == 1:
obj = cast("Series", obj)
name = obj.name
else:
name = None
result = Series(results, name=name)
return result
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
from google.api_core import operations_v1
from google.cloud.vision_v1p3beta1.proto import product_search_service_pb2_grpc
class ProductSearchGrpcTransport(object):
"""gRPC transport class providing stubs for
google.cloud.vision.v1p3beta1 ProductSearch API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/cloud-vision',
)
def __init__(self,
channel=None,
credentials=None,
address='vision.googleapis.com:443'):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
'The `channel` and `credentials` arguments are mutually '
'exclusive.', )
# Create the channel.
if channel is None:
channel = self.create_channel(
address=address,
credentials=credentials,
)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
'product_search_stub':
product_search_service_pb2_grpc.ProductSearchStub(channel),
}
# Because this API includes a method that returns a
# long-running operation (proto: google.longrunning.Operation),
# instantiate an LRO client.
self._operations_client = google.api_core.operations_v1.OperationsClient(
channel)
@classmethod
def create_channel(cls,
address='vision.googleapis.com:443',
credentials=None):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address,
credentials=credentials,
scopes=cls._OAUTH_SCOPES,
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def create_product_set(self):
"""Return the gRPC stub for {$apiMethod.name}.
Creates and returns a new ProductSet resource.
Possible errors:
- Returns INVALID\_ARGUMENT if display\_name is missing, or is longer
than 4096 characters.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['product_search_stub'].CreateProductSet
@property
def list_product_sets(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists ProductSets in an unspecified order.
Possible errors:
- Returns INVALID\_ARGUMENT if page\_size is greater than 100, or less
than 1.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['product_search_stub'].ListProductSets
@property
def get_product_set(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets information associated with a ProductSet.
Possible errors:
- Returns NOT\_FOUND if the ProductSet does not exist.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['product_search_stub'].GetProductSet
@property
def update_product_set(self):
"""Return the gRPC stub for {$apiMethod.name}.
Makes changes to a ProductSet resource. Only display\_name can be
updated currently.
Possible errors:
- Returns NOT\_FOUND if the ProductSet does not exist.
- Returns INVALID\_ARGUMENT if display\_name is present in update\_mask
but missing from the request or longer than 4096 characters.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['product_search_stub'].UpdateProductSet
@property
def delete_product_set(self):
"""Return the gRPC stub for {$apiMethod.name}.
Permanently deletes a ProductSet. All Products and ReferenceImages in
the ProductSet will be deleted.
The actual image files are not deleted from Google Cloud Storage.
Possible errors:
- Returns NOT\_FOUND if the ProductSet does not exist.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['product_search_stub'].DeleteProductSet
@property
def create_product(self):
"""Return the gRPC stub for {$apiMethod.name}.
Creates and returns a new product resource.
Possible errors:
- Returns INVALID\_ARGUMENT if display\_name is missing or longer than
4096 characters.
- Returns INVALID\_ARGUMENT if description is longer than 4096
characters.
- Returns INVALID\_ARGUMENT if product\_category is missing or invalid.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['product_search_stub'].CreateProduct
@property
def list_products(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists products in an unspecified order.
Possible errors:
- Returns INVALID\_ARGUMENT if page\_size is greater than 100 or less
than 1.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['product_search_stub'].ListProducts
@property
def get_product(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets information associated with a Product.
Possible errors:
- Returns NOT\_FOUND if the Product does not exist.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['product_search_stub'].GetProduct
@property
def update_product(self):
"""Return the gRPC stub for {$apiMethod.name}.
Makes changes to a Product resource. Only display\_name, description and
labels can be updated right now.
If labels are updated, the change will not be reflected in queries until
the next index time.
Possible errors:
- Returns NOT\_FOUND if the Product does not exist.
- Returns INVALID\_ARGUMENT if display\_name is present in update\_mask
but is missing from the request or longer than 4096 characters.
- Returns INVALID\_ARGUMENT if description is present in update\_mask
but is longer than 4096 characters.
- Returns INVALID\_ARGUMENT if product\_category is present in
update\_mask.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['product_search_stub'].UpdateProduct
@property
def delete_product(self):
"""Return the gRPC stub for {$apiMethod.name}.
Permanently deletes a product and its reference images.
Metadata of the product and all its images will be deleted right away,
but search queries against ProductSets containing the product may still
work until all related caches are refreshed.
Possible errors:
- Returns NOT\_FOUND if the product does not exist.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['product_search_stub'].DeleteProduct
@property
def create_reference_image(self):
"""Return the gRPC stub for {$apiMethod.name}.
Creates and returns a new ReferenceImage resource.
The ``bounding_poly`` field is optional. If ``bounding_poly`` is not
specified, the system will try to detect regions of interest in the
image that are compatible with the product\_category on the parent
product. If it is specified, detection is ALWAYS skipped. The system
converts polygons into non-rotated rectangles.
Note that the pipeline will resize the image if the image resolution is
too large to process (above 50MP).
Possible errors:
- Returns INVALID\_ARGUMENT if the image\_uri is missing or longer than
4096 characters.
- Returns INVALID\_ARGUMENT if the product does not exist.
- Returns INVALID\_ARGUMENT if bounding\_poly is not provided, and
nothing compatible with the parent product's product\_category is
detected.
- Returns INVALID\_ARGUMENT if bounding\_poly contains more than 10
polygons.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['product_search_stub'].CreateReferenceImage
@property
def delete_reference_image(self):
"""Return the gRPC stub for {$apiMethod.name}.
Permanently deletes a reference image.
The image metadata will be deleted right away, but search queries
against ProductSets containing the image may still work until all
related caches are refreshed.
The actual image files are not deleted from Google Cloud Storage.
Possible errors:
- Returns NOT\_FOUND if the reference image does not exist.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['product_search_stub'].DeleteReferenceImage
@property
def list_reference_images(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists reference images.
Possible errors:
- Returns NOT\_FOUND if the parent product does not exist.
- Returns INVALID\_ARGUMENT if the page\_size is greater than 100, or
less than 1.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['product_search_stub'].ListReferenceImages
@property
def get_reference_image(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets information associated with a ReferenceImage.
Possible errors:
- Returns NOT\_FOUND if the specified image does not exist.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['product_search_stub'].GetReferenceImage
@property
def add_product_to_product_set(self):
"""Return the gRPC stub for {$apiMethod.name}.
Adds a Product to the specified ProductSet. If the Product is already
present, no change is made.
One Product can be added to at most 100 ProductSets.
Possible errors:
- Returns NOT\_FOUND if the Product or the ProductSet doesn't exist.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['product_search_stub'].AddProductToProductSet
@property
def remove_product_from_product_set(self):
"""Return the gRPC stub for {$apiMethod.name}.
Removes a Product from the specified ProductSet.
Possible errors:
- Returns NOT\_FOUND If the Product is not found under the ProductSet.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['product_search_stub'].RemoveProductFromProductSet
@property
def list_products_in_product_set(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists the Products in a ProductSet, in an unspecified order. If the
ProductSet does not exist, the products field of the response will be
empty.
Possible errors:
- Returns INVALID\_ARGUMENT if page\_size is greater than 100 or less
than 1.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['product_search_stub'].ListProductsInProductSet
@property
def import_product_sets(self):
"""Return the gRPC stub for {$apiMethod.name}.
Asynchronous API that imports a list of reference images to specified
product sets based on a list of image information.
The ``google.longrunning.Operation`` API can be used to keep track of
the progress and results of the request. ``Operation.metadata`` contains
``BatchOperationMetadata``. (progress) ``Operation.response`` contains
``ImportProductSetsResponse``. (results)
The input source of this method is a csv file on Google Cloud Storage.
For the format of the csv file please see
``ImportProductSetsGcsSource.csv_file_uri``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['product_search_stub'].ImportProductSets
|
|
from __future__ import absolute_import
import logging
import os
import re
import shutil
import sys
import tempfile
import warnings
import zipfile
from distutils.util import change_root
from distutils import sysconfig
from email.parser import FeedParser
from pip._vendor import pkg_resources, six
from pip._vendor.distlib.markers import interpret as markers_interpret
from pip._vendor.six.moves import configparser
from pip._vendor.six.moves.urllib import parse as urllib_parse
import pip.wheel
from pip.compat import native_str, WINDOWS
from pip.download import is_url, url_to_path, path_to_url, is_archive_file
from pip.exceptions import (
InstallationError, UninstallationError, UnsupportedWheel,
)
from pip.locations import (
bin_py, running_under_virtualenv, PIP_DELETE_MARKER_FILENAME, bin_user,
)
from pip.utils import (
display_path, rmtree, ask_path_exists, backup_dir, is_installable_dir,
dist_in_usersite, dist_in_site_packages, egg_link_path, make_path_relative,
call_subprocess, read_text_file, FakeFile, _make_build_dir,
)
from pip.utils.deprecation import RemovedInPip8Warning
from pip.utils.logging import indent_log
from pip.req.req_uninstall import UninstallPathSet
from pip.vcs import vcs
from pip.wheel import move_wheel_files, Wheel, wheel_ext
from pip._vendor.packaging.version import Version
logger = logging.getLogger(__name__)
class InstallRequirement(object):
def __init__(self, req, comes_from, source_dir=None, editable=False,
url=None, as_egg=False, update=True, editable_options=None,
pycompile=True, markers=None, isolated=False):
self.extras = ()
if isinstance(req, six.string_types):
req = pkg_resources.Requirement.parse(req)
self.extras = req.extras
self.req = req
self.comes_from = comes_from
self.source_dir = source_dir
self.editable = editable
if editable_options is None:
editable_options = {}
self.editable_options = editable_options
self.url = url
self.as_egg = as_egg
self.markers = markers
self._egg_info_path = None
# This holds the pkg_resources.Distribution object if this requirement
# is already available:
self.satisfied_by = None
# This hold the pkg_resources.Distribution object if this requirement
# conflicts with another installed distribution:
self.conflicts_with = None
self._temp_build_dir = None
# True if the editable should be updated:
self.update = update
# Set to True after successful installation
self.install_succeeded = None
# UninstallPathSet of uninstalled distribution (for possible rollback)
self.uninstalled = None
self.use_user_site = False
self.target_dir = None
self.pycompile = pycompile
self.isolated = isolated
@classmethod
def from_editable(cls, editable_req, comes_from=None, default_vcs=None,
isolated=False):
name, url, extras_override, editable_options = parse_editable(
editable_req, default_vcs)
if url.startswith('file:'):
source_dir = url_to_path(url)
else:
source_dir = None
res = cls(name, comes_from, source_dir=source_dir,
editable=True,
url=url,
editable_options=editable_options,
isolated=isolated)
if extras_override is not None:
res.extras = extras_override
return res
@classmethod
def from_line(cls, name, comes_from=None, isolated=False):
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
"""
from pip.index import Link
url = None
if is_url(name):
marker_sep = '; '
else:
marker_sep = ';'
if marker_sep in name:
name, markers = name.split(marker_sep, 1)
markers = markers.strip()
if not markers:
markers = None
else:
markers = None
name = name.strip()
req = None
path = os.path.normpath(os.path.abspath(name))
link = None
if is_url(name):
link = Link(name)
elif (os.path.isdir(path)
and (os.path.sep in name or name.startswith('.'))):
if not is_installable_dir(path):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' not "
"found." % name
)
link = Link(path_to_url(name))
elif is_archive_file(path):
if not os.path.isfile(path):
logger.warning(
'Requirement %r looks like a filename, but the file does '
'not exist',
name
)
link = Link(path_to_url(name))
# it's a local file, dir, or url
if link:
url = link.url
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', url):
url = path_to_url(os.path.normpath(os.path.abspath(link.path)))
# wheel file
if link.ext == wheel_ext:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel on this platform." %
wheel.filename
)
req = "%s==%s" % (wheel.name, wheel.version)
else:
# set the req to the egg fragment. when it's not there, this
# will become an 'unnamed' requirement
req = link.egg_fragment
# a requirement specifier
else:
req = name
return cls(req, comes_from, url=url, markers=markers,
isolated=isolated)
def __str__(self):
if self.req:
s = str(self.req)
if self.url:
s += ' from %s' % self.url
else:
s = self.url
if self.satisfied_by is not None:
s += ' in %s' % display_path(self.satisfied_by.location)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += ' (from %s)' % comes_from
return s
@property
def specifier(self):
return self.req.specifier
def from_path(self):
if self.req is None:
return None
s = str(self.req)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += '->' + comes_from
return s
def build_location(self, build_dir):
if self._temp_build_dir is not None:
return self._temp_build_dir
if self.req is None:
self._temp_build_dir = tempfile.mkdtemp('-build', 'pip-')
self._ideal_build_dir = build_dir
return self._temp_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
# FIXME: Is there a better place to create the build_dir? (hg and bzr
# need this)
if not os.path.exists(build_dir):
_make_build_dir(build_dir)
return os.path.join(build_dir, name)
def correct_build_location(self):
"""If the build location was a temporary directory, this will move it
to a new more permanent location"""
if self.source_dir is not None:
return
assert self.req is not None
assert self._temp_build_dir
old_location = self._temp_build_dir
new_build_dir = self._ideal_build_dir
del self._ideal_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
new_location = os.path.join(new_build_dir, name)
if not os.path.exists(new_build_dir):
logger.debug('Creating directory %s', new_build_dir)
_make_build_dir(new_build_dir)
if os.path.exists(new_location):
raise InstallationError(
'A package already exists in %s; please remove it to continue'
% display_path(new_location))
logger.debug(
'Moving package %s from %s to new location %s',
self, display_path(old_location), display_path(new_location),
)
shutil.move(old_location, new_location)
self._temp_build_dir = new_location
self.source_dir = new_location
self._egg_info_path = None
@property
def name(self):
if self.req is None:
return None
return native_str(self.req.project_name)
@property
def url_name(self):
if self.req is None:
return None
return urllib_parse.quote(self.req.project_name.lower())
@property
def setup_py(self):
try:
import setuptools # noqa
except ImportError:
# Setuptools is not available
raise InstallationError(
"setuptools must be installed to install from a source "
"distribution"
)
setup_file = 'setup.py'
if self.editable_options and 'subdirectory' in self.editable_options:
setup_py = os.path.join(self.source_dir,
self.editable_options['subdirectory'],
setup_file)
else:
setup_py = os.path.join(self.source_dir, setup_file)
# Python2 __file__ should not be unicode
if six.PY2 and isinstance(setup_py, six.text_type):
setup_py = setup_py.encode(sys.getfilesystemencoding())
return setup_py
def run_egg_info(self):
assert self.source_dir
if self.name:
logger.debug(
'Running setup.py (path:%s) egg_info for package %s',
self.setup_py, self.name,
)
else:
logger.debug(
'Running setup.py (path:%s) egg_info for package from %s',
self.setup_py, self.url,
)
with indent_log():
# if it's distribute>=0.7, it won't contain an importable
# setuptools, and having an egg-info dir blocks the ability of
# setup.py to find setuptools plugins, so delete the egg-info dir
# if no setuptools. it will get recreated by the run of egg_info
# NOTE: this self.name check only works when installing from a
# specifier (not archive path/urls)
# TODO: take this out later
if (self.name == 'distribute'
and not os.path.isdir(
os.path.join(self.source_dir, 'setuptools'))):
rmtree(os.path.join(self.source_dir, 'distribute.egg-info'))
script = self._run_setup_py
script = script.replace('__SETUP_PY__', repr(self.setup_py))
script = script.replace('__PKG_NAME__', repr(self.name))
base_cmd = [sys.executable, '-c', script]
if self.isolated:
base_cmd += ["--no-user-cfg"]
egg_info_cmd = base_cmd + ['egg_info']
# We can't put the .egg-info files at the root, because then the
# source code will be mistaken for an installed egg, causing
# problems
if self.editable:
egg_base_option = []
else:
egg_info_dir = os.path.join(self.source_dir, 'pip-egg-info')
if not os.path.exists(egg_info_dir):
os.makedirs(egg_info_dir)
egg_base_option = ['--egg-base', 'pip-egg-info']
cwd = self.source_dir
if self.editable_options and \
'subdirectory' in self.editable_options:
cwd = os.path.join(cwd, self.editable_options['subdirectory'])
call_subprocess(
egg_info_cmd + egg_base_option,
cwd=cwd,
filter_stdout=self._filter_install,
show_stdout=False,
command_level=logging.DEBUG,
command_desc='python setup.py egg_info')
if not self.req:
if isinstance(
pkg_resources.parse_version(self.pkg_info()["Version"]),
Version):
op = "=="
else:
op = "==="
self.req = pkg_resources.Requirement.parse(
"".join([
self.pkg_info()["Name"],
op,
self.pkg_info()["Version"],
]))
self.correct_build_location()
# FIXME: This is a lame hack, entirely for PasteScript which has
# a self-provided entry point that causes this awkwardness
_run_setup_py = """
__file__ = __SETUP_PY__
from setuptools.command import egg_info
import pkg_resources
import os
import tokenize
def replacement_run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in pkg_resources.iter_entry_points('egg_info.writers'):
# require=False is the change we're making:
writer = ep.load(require=False)
if writer:
writer(self, ep.name, os.path.join(self.egg_info,ep.name))
self.find_sources()
egg_info.egg_info.run = replacement_run
exec(compile(
getattr(tokenize, 'open', open)(__file__).read().replace('\\r\\n', '\\n'),
__file__,
'exec'
))
"""
def egg_info_data(self, filename):
if self.satisfied_by is not None:
if not self.satisfied_by.has_metadata(filename):
return None
return self.satisfied_by.get_metadata(filename)
assert self.source_dir
filename = self.egg_info_path(filename)
if not os.path.exists(filename):
return None
data = read_text_file(filename)
return data
def egg_info_path(self, filename):
if self._egg_info_path is None:
if self.editable:
base = self.source_dir
else:
base = os.path.join(self.source_dir, 'pip-egg-info')
filenames = os.listdir(base)
if self.editable:
filenames = []
for root, dirs, files in os.walk(base):
for dir in vcs.dirnames:
if dir in dirs:
dirs.remove(dir)
# Iterate over a copy of ``dirs``, since mutating
# a list while iterating over it can cause trouble.
# (See https://github.com/pypa/pip/pull/462.)
for dir in list(dirs):
# Don't search in anything that looks like a virtualenv
# environment
if (
os.path.exists(
os.path.join(root, dir, 'bin', 'python')
)
or os.path.exists(
os.path.join(
root, dir, 'Scripts', 'Python.exe'
)
)):
dirs.remove(dir)
# Also don't search through tests
elif dir == 'test' or dir == 'tests':
dirs.remove(dir)
filenames.extend([os.path.join(root, dir)
for dir in dirs])
filenames = [f for f in filenames if f.endswith('.egg-info')]
if not filenames:
raise InstallationError(
'No files/directories in %s (from %s)' % (base, filename)
)
assert filenames, \
"No files/directories in %s (from %s)" % (base, filename)
# if we have more than one match, we pick the toplevel one. This
# can easily be the case if there is a dist folder which contains
# an extracted tarball for testing purposes.
if len(filenames) > 1:
filenames.sort(
key=lambda x: x.count(os.path.sep)
+ (os.path.altsep and x.count(os.path.altsep) or 0)
)
self._egg_info_path = os.path.join(base, filenames[0])
return os.path.join(self._egg_info_path, filename)
def pkg_info(self):
p = FeedParser()
data = self.egg_info_data('PKG-INFO')
if not data:
logger.warning(
'No PKG-INFO file found in %s',
display_path(self.egg_info_path('PKG-INFO')),
)
p.feed(data or '')
return p.close()
_requirements_section_re = re.compile(r'\[(.*?)\]')
@property
def installed_version(self):
# Create a requirement that we'll look for inside of setuptools.
req = pkg_resources.Requirement.parse(self.name)
# We want to avoid having this cached, so we need to construct a new
# working set each time.
working_set = pkg_resources.WorkingSet()
# Get the installed distribution from our working set
dist = working_set.find(req)
# Check to see if we got an installed distribution or not, if we did
# we want to return it's version.
if dist:
return dist.version
def assert_source_matches_version(self):
assert self.source_dir
version = self.pkg_info()['version']
if version not in self.req:
logger.warning(
'Requested %s, but installing version %s',
self,
self.installed_version,
)
else:
logger.debug(
'Source in %s has version %s, which satisfies requirement %s',
display_path(self.source_dir),
version,
self,
)
def update_editable(self, obtain=True):
if not self.url:
logger.debug(
"Cannot update repository at %s; repository location is "
"unknown",
self.source_dir,
)
return
assert self.editable
assert self.source_dir
if self.url.startswith('file:'):
# Static paths don't get updated
return
assert '+' in self.url, "bad url: %r" % self.url
if not self.update:
return
vc_type, url = self.url.split('+', 1)
backend = vcs.get_backend(vc_type)
if backend:
vcs_backend = backend(self.url)
if obtain:
vcs_backend.obtain(self.source_dir)
else:
vcs_backend.export(self.source_dir)
else:
assert 0, (
'Unexpected version control type (in %s): %s'
% (self.url, vc_type))
def uninstall(self, auto_confirm=False):
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
if not self.check_if_exists():
raise UninstallationError(
"Cannot uninstall requirement %s, not installed" % (self.name,)
)
dist = self.satisfied_by or self.conflicts_with
paths_to_remove = UninstallPathSet(dist)
develop_egg_link = egg_link_path(dist)
egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info)
# Special case for distutils installed package
distutils_egg_info = getattr(dist._provider, 'path', None)
if develop_egg_link:
# develop egg
with open(develop_egg_link, 'r') as fh:
link_pointer = os.path.normcase(fh.readline().strip())
assert (link_pointer == dist.location), (
'Egg-link %s does not match installed location of %s '
'(at %s)' % (link_pointer, self.name, dist.location)
)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
elif egg_info_exists and dist.egg_info.endswith('.egg-info'):
paths_to_remove.add(dist.egg_info)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata(
'installed-files.txt').splitlines():
path = os.path.normpath(
os.path.join(dist.egg_info, installed_file)
)
paths_to_remove.add(path)
# FIXME: need a test for this elif block
# occurs with --single-version-externally-managed/--record outside
# of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [
p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
elif distutils_egg_info:
warnings.warn(
"Uninstalling a distutils installed project ({0}) has been "
"deprecated and will be removed in a future version. This is "
"due to the fact that uninstalling a distutils project will "
"only partially uninstall the project.".format(self.name),
RemovedInPip8Warning,
)
paths_to_remove.add(distutils_egg_info)
elif dist.location.endswith('.egg'):
# package installed by easy_install
# We cannot match on dist.egg_name because it can slightly vary
# i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg
paths_to_remove.add(dist.location)
easy_install_egg = os.path.split(dist.location)[1]
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif egg_info_exists and dist.egg_info.endswith('.dist-info'):
for path in pip.wheel.uninstallation_paths(dist):
paths_to_remove.add(path)
else:
logger.debug(
'Not sure how to uninstall: %s - Check: %s',
dist, dist.location)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, script))
if WINDOWS:
paths_to_remove.add(os.path.join(bin_dir, script) + '.bat')
# find console_scripts
if dist.has_metadata('entry_points.txt'):
config = configparser.SafeConfigParser()
config.readfp(
FakeFile(dist.get_metadata_lines('entry_points.txt'))
)
if config.has_section('console_scripts'):
for name, value in config.items('console_scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, name))
if WINDOWS:
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe.manifest'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '-script.py'
)
paths_to_remove.remove(auto_confirm)
self.uninstalled = paths_to_remove
def rollback_uninstall(self):
if self.uninstalled:
self.uninstalled.rollback()
else:
logger.error(
"Can't rollback %s, nothing uninstalled.", self.project_name,
)
def commit_uninstall(self):
if self.uninstalled:
self.uninstalled.commit()
else:
logger.error(
"Can't commit %s, nothing uninstalled.", self.project_name,
)
def archive(self, build_dir):
assert self.source_dir
create_archive = True
archive_name = '%s-%s.zip' % (self.name, self.pkg_info()["version"])
archive_path = os.path.join(build_dir, archive_name)
if os.path.exists(archive_path):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(archive_path), ('i', 'w', 'b'))
if response == 'i':
create_archive = False
elif response == 'w':
logger.warning('Deleting %s', display_path(archive_path))
os.remove(archive_path)
elif response == 'b':
dest_file = backup_dir(archive_path)
logger.warning(
'Backing up %s to %s',
display_path(archive_path),
display_path(dest_file),
)
shutil.move(archive_path, dest_file)
if create_archive:
zip = zipfile.ZipFile(
archive_path, 'w', zipfile.ZIP_DEFLATED,
allowZip64=True
)
dir = os.path.normcase(os.path.abspath(self.source_dir))
for dirpath, dirnames, filenames in os.walk(dir):
if 'pip-egg-info' in dirnames:
dirnames.remove('pip-egg-info')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zipdir = zipfile.ZipInfo(self.name + '/' + name + '/')
zipdir.external_attr = 0x1ED << 16 # 0o755
zip.writestr(zipdir, '')
for filename in filenames:
if filename == PIP_DELETE_MARKER_FILENAME:
continue
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, self.name + '/' + name)
zip.close()
logger.info('Saved %s', display_path(archive_path))
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix + os.path.sep), (
"name %r doesn't start with prefix %r" % (name, prefix)
)
name = name[len(prefix) + 1:]
name = name.replace(os.path.sep, '/')
return name
def match_markers(self):
if self.markers is not None:
return markers_interpret(self.markers)
else:
return True
def install(self, install_options, global_options=(), root=None):
if self.editable:
self.install_editable(install_options, global_options)
return
if self.is_wheel:
version = pip.wheel.wheel_version(self.source_dir)
pip.wheel.check_compatibility(version, self.name)
self.move_wheel_files(self.source_dir, root=root)
self.install_succeeded = True
return
if self.isolated:
global_options = list(global_options) + ["--no-user-cfg"]
temp_location = tempfile.mkdtemp('-record', 'pip-')
record_filename = os.path.join(temp_location, 'install-record.txt')
try:
install_args = [sys.executable]
install_args.append('-c')
install_args.append(
"import setuptools, tokenize;__file__=%r;"
"exec(compile(getattr(tokenize, 'open', open)(__file__).read()"
".replace('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py
)
install_args += list(global_options) + \
['install', '--record', record_filename]
if not self.as_egg:
install_args += ['--single-version-externally-managed']
if root is not None:
install_args += ['--root', root]
if self.pycompile:
install_args += ["--compile"]
else:
install_args += ["--no-compile"]
if running_under_virtualenv():
# FIXME: I'm not sure if this is a reasonable location;
# probably not but we can't put it in the default location, as
# that is a virtualenv symlink that isn't writable
py_ver_str = 'python' + sysconfig.get_python_version()
install_args += ['--install-headers',
os.path.join(sys.prefix, 'include', 'site',
py_ver_str)]
logger.info('Running setup.py install for %s', self.name)
with indent_log():
call_subprocess(
install_args + install_options,
cwd=self.source_dir,
filter_stdout=self._filter_install,
show_stdout=False,
)
if not os.path.exists(record_filename):
logger.debug('Record file %s not found', record_filename)
return
self.install_succeeded = True
if self.as_egg:
# there's no --always-unzip option we can pass to install
# command so we unable to save the installed-files.txt
return
def prepend_root(path):
if root is None or not os.path.isabs(path):
return path
else:
return change_root(root, path)
with open(record_filename) as f:
for line in f:
directory = os.path.dirname(line)
if directory.endswith('.egg-info'):
egg_info_dir = prepend_root(directory)
break
else:
logger.warning(
'Could not find .egg-info directory in install record'
' for %s',
self,
)
# FIXME: put the record somewhere
# FIXME: should this be an error?
return
new_lines = []
with open(record_filename) as f:
for line in f:
filename = line.strip()
if os.path.isdir(filename):
filename += os.path.sep
new_lines.append(
make_path_relative(
prepend_root(filename), egg_info_dir)
)
inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt')
with open(inst_files_path, 'w') as f:
f.write('\n'.join(new_lines) + '\n')
finally:
if os.path.exists(record_filename):
os.remove(record_filename)
os.rmdir(temp_location)
def remove_temporary_source(self):
"""Remove the source files from this requirement, if they are marked
for deletion"""
if self.source_dir and os.path.exists(
os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)):
logger.debug('Removing source in %s', self.source_dir)
rmtree(self.source_dir)
self.source_dir = None
if self._temp_build_dir and os.path.exists(self._temp_build_dir):
rmtree(self._temp_build_dir)
self._temp_build_dir = None
def install_editable(self, install_options, global_options=()):
logger.info('Running setup.py develop for %s', self.name)
if self.isolated:
global_options = list(global_options) + ["--no-user-cfg"]
with indent_log():
# FIXME: should we do --install-headers here too?
cwd = self.source_dir
if self.editable_options and \
'subdirectory' in self.editable_options:
cwd = os.path.join(cwd, self.editable_options['subdirectory'])
call_subprocess(
[
sys.executable,
'-c',
"import setuptools, tokenize; __file__=%r; exec(compile("
"getattr(tokenize, 'open', open)(__file__).read().replace"
"('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py
]
+ list(global_options)
+ ['develop', '--no-deps']
+ list(install_options),
cwd=cwd, filter_stdout=self._filter_install,
show_stdout=False)
self.install_succeeded = True
def _filter_install(self, line):
level = logging.INFO
for regex in [
r'^running .*',
r'^writing .*',
'^creating .*',
'^[Cc]opying .*',
r'^reading .*',
r"^removing .*\.egg-info' \(and everything under it\)$",
r'^byte-compiling ',
r'^SyntaxError:',
r'^SyntaxWarning:',
r'^\s*Skipping implicit fixer: ',
r'^\s*(warning: )?no previously-included (files|directories) ',
r'^\s*warning: no files found matching \'.*\'',
# Not sure what this warning is, but it seems harmless:
r"^warning: manifest_maker: standard file '-c' not found$"]:
if not line or re.search(regex, line.strip()):
level = logging.DEBUG
break
return (level, line)
def check_if_exists(self):
"""Find an installed distribution that satisfies or conflicts
with this requirement, and set self.satisfied_by or
self.conflicts_with appropriately."""
if self.req is None:
return False
try:
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# if we've already set distribute as a conflict to setuptools
# then this check has already run before. we don't want it to
# run again, and return False, since it would block the uninstall
# TODO: remove this later
if (self.req.project_name == 'setuptools'
and self.conflicts_with
and self.conflicts_with.project_name == 'distribute'):
return True
else:
self.satisfied_by = pkg_resources.get_distribution(self.req)
except pkg_resources.DistributionNotFound:
return False
except pkg_resources.VersionConflict:
existing_dist = pkg_resources.get_distribution(
self.req.project_name
)
if self.use_user_site:
if dist_in_usersite(existing_dist):
self.conflicts_with = existing_dist
elif (running_under_virtualenv()
and dist_in_site_packages(existing_dist)):
raise InstallationError(
"Will not install to the user site because it will "
"lack sys.path precedence to %s in %s" %
(existing_dist.project_name, existing_dist.location)
)
else:
self.conflicts_with = existing_dist
return True
@property
def is_wheel(self):
return self.url and '.whl' in self.url
def move_wheel_files(self, wheeldir, root=None):
move_wheel_files(
self.name, self.req, wheeldir,
user=self.use_user_site,
home=self.target_dir,
root=root,
pycompile=self.pycompile,
isolated=self.isolated,
)
def get_dist(self):
"""Return a pkg_resources.Distribution built from self.egg_info_path"""
egg_info = self.egg_info_path('')
base_dir = os.path.dirname(egg_info)
metadata = pkg_resources.PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
return pkg_resources.Distribution(
os.path.dirname(egg_info),
project_name=dist_name,
metadata=metadata)
def _strip_postfix(req):
"""
Strip req postfix ( -dev, 0.2, etc )
"""
# FIXME: use package_to_requirement?
match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req)
if match:
# Strip off -dev, -0.2, etc.
req = match.group(1)
return req
def _build_req_from_url(url):
parts = [p for p in url.split('#', 1)[0].split('/') if p]
req = None
if parts[-2] in ('tags', 'branches', 'tag', 'branch'):
req = parts[-3]
elif parts[-1] == 'trunk':
req = parts[-2]
return req
def _build_editable_options(req):
"""
This method generates a dictionary of the query string
parameters contained in a given editable URL.
"""
regexp = re.compile(r"[\?#&](?P<name>[^&=]+)=(?P<value>[^&=]+)")
matched = regexp.findall(req)
if matched:
ret = dict()
for option in matched:
(name, value) = option
if name in ret:
raise Exception("%s option already defined" % name)
ret[name] = value
return ret
return None
def parse_editable(editable_req, default_vcs=None):
"""Parses an editable requirement into:
- a requirement name
- an URL
- extras
- editable options
Accepted requirements:
svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
.[some_extra]
"""
url = editable_req
extras = None
# If a file path is specified with extras, strip off the extras.
m = re.match(r'^(.+)(\[[^\]]+\])$', url)
if m:
url_no_extras = m.group(1)
extras = m.group(2)
else:
url_no_extras = url
if os.path.isdir(url_no_extras):
if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' not found." %
url_no_extras
)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
if extras:
return (
None,
url_no_extras,
pkg_resources.Requirement.parse(
'__placeholder__' + extras
).extras,
{},
)
else:
return None, url_no_extras, None, {}
for version_control in vcs:
if url.lower().startswith('%s:' % version_control):
url = '%s+%s' % (version_control, url)
break
if '+' not in url:
if default_vcs:
url = default_vcs + '+' + url
else:
raise InstallationError(
'%s should either be a path to a local project or a VCS url '
'beginning with svn+, git+, hg+, or bzr+' %
editable_req
)
vc_type = url.split('+', 1)[0].lower()
if not vcs.get_backend(vc_type):
error_message = 'For --editable=%s only ' % editable_req + \
', '.join([backend.name + '+URL' for backend in vcs.backends]) + \
' is currently supported'
raise InstallationError(error_message)
try:
options = _build_editable_options(editable_req)
except Exception as exc:
raise InstallationError(
'--editable=%s error in editable options:%s' % (editable_req, exc)
)
if not options or 'egg' not in options:
req = _build_req_from_url(editable_req)
if not req:
raise InstallationError(
'--editable=%s is not the right format; it must have '
'#egg=Package' % editable_req
)
else:
req = options['egg']
package = _strip_postfix(req)
return package, url, None, options
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ManagedClustersOperations:
"""ManagedClustersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~service_fabric_managed_clusters_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.ManagedClusterListResult"]:
"""Gets the list of Service Fabric cluster resources created in the specified resource group.
Gets all Service Fabric cluster resources created or in the process of being created in the
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~service_fabric_managed_clusters_management_client.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedClusterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/managedClusters'} # type: ignore
def list_by_subscription(
self,
**kwargs
) -> AsyncIterable["_models.ManagedClusterListResult"]:
"""Gets the list of Service Fabric cluster resources created in the specified subscription.
Gets all Service Fabric cluster resources created or in the process of being created in the
subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~service_fabric_managed_clusters_management_client.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedClusterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ServiceFabric/managedClusters'} # type: ignore
async def get(
self,
resource_group_name: str,
cluster_name: str,
**kwargs
) -> "_models.ManagedCluster":
"""Gets a Service Fabric managed cluster resource.
Get a Service Fabric managed cluster resource created or in the process of being created in the
specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster resource.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedCluster, or the result of cls(response)
:rtype: ~service_fabric_managed_clusters_management_client.models.ManagedCluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/managedClusters/{clusterName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
cluster_name: str,
parameters: "_models.ManagedCluster",
**kwargs
) -> "_models.ManagedCluster":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedCluster')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/managedClusters/{clusterName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
cluster_name: str,
parameters: "_models.ManagedCluster",
**kwargs
) -> AsyncLROPoller["_models.ManagedCluster"]:
"""Creates or updates a Service Fabric managed cluster resource.
Create or update a Service Fabric managed cluster resource with the specified name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster resource.
:type cluster_name: str
:param parameters: The cluster resource.
:type parameters: ~service_fabric_managed_clusters_management_client.models.ManagedCluster
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~service_fabric_managed_clusters_management_client.models.ManagedCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/managedClusters/{clusterName}'} # type: ignore
async def update(
self,
resource_group_name: str,
cluster_name: str,
parameters: "_models.ManagedClusterUpdateParameters",
**kwargs
) -> "_models.ManagedCluster":
"""Updates the tags of of a Service Fabric managed cluster resource.
Update the tags of of a Service Fabric managed cluster resource with the specified name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster resource.
:type cluster_name: str
:param parameters: The managed cluster resource updated tags.
:type parameters: ~service_fabric_managed_clusters_management_client.models.ManagedClusterUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedCluster, or the result of cls(response)
:rtype: ~service_fabric_managed_clusters_management_client.models.ManagedCluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedClusterUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/managedClusters/{clusterName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
cluster_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/managedClusters/{clusterName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
cluster_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes a Service Fabric managed cluster resource.
Delete a Service Fabric managed cluster resource with the specified name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster resource.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/managedClusters/{clusterName}'} # type: ignore
|
|
import atexit
import functools
import socket
import warnings
import weakref
import time
# So that 'setup.py doc' can import this module without Tornado or greenlet
requirements_satisfied = True
try:
from tornado import iostream, ioloop
except ImportError:
requirements_satisfied = False
warnings.warn("Tornado not installed", ImportWarning)
try:
import greenlet
except ImportError:
requirements_satisfied = False
warnings.warn("greenlet module not installed", ImportWarning)
import pymongo
import pymongo.common
import pymongo.errors
import pymongo.mongo_client
import pymongo.mongo_replica_set_client
import pymongo.pool
import pymongo.son_manipulator
import logging
class MongoIOStream(iostream.IOStream):
def can_read_sync(self, num_bytes):
return self._read_buffer_size >= num_bytes
def _check_deadline(cleanup_cb=None):
gr = greenlet.getcurrent()
if hasattr(gr, 'is_deadlined') and \
gr.is_deadlined():
if cleanup_cb:
cleanup_cb()
try:
gr.do_deadline()
except AttributeError:
logging.exception(
'Greenlet %s has \'is_deadlined\' but not \'do_deadline\'')
def green_sock_method(method):
"""Wrap a GreenletSocket method to pause the current greenlet and arrange
for the greenlet to be resumed when non-blocking I/O has completed.
"""
@functools.wraps(method)
def _green_sock_method(self, *args, **kwargs):
self.child_gr = greenlet.getcurrent()
main = self.child_gr.parent
assert main, "Should be on child greenlet"
# Run on main greenlet
def closed(gr):
# The child greenlet might have died, e.g.:
# - An operation raised an error within PyMongo
# - PyMongo closed the MotorSocket in response
# - GreenletSocket.close() closed the IOStream
# - IOStream scheduled this closed() function on the loop
# - PyMongo operation completed (with or without error) and
# its greenlet terminated
# - IOLoop runs this function
if not gr.dead:
gr.throw(socket.error("Close called, killing mongo operation"))
# send the error to this greenlet if something goes wrong during the
# query
self.stream.set_close_callback(functools.partial(closed, self.child_gr))
try:
# Add timeout for closing non-blocking method call
if self.timeout and not self.timeout_handle:
self.timeout_handle = self.io_loop.add_timeout(
time.time() + self.timeout, self._switch_and_close)
# method is GreenletSocket.send(), recv(), etc. method() begins a
# non-blocking operation on an IOStream and arranges for
# callback() to be executed on the main greenlet once the
# operation has completed.
method(self, *args, **kwargs)
# Pause child greenlet until resumed by main greenlet, which
# will pass the result of the socket operation (data for recv,
# number of bytes written for sendall) to us.
socket_result = main.switch()
return socket_result
except socket.error:
raise
except IOError, e:
# If IOStream raises generic IOError (e.g., if operation
# attempted on closed IOStream), then substitute socket.error,
# since socket.error is what PyMongo's built to handle. For
# example, PyMongo will catch socket.error, close the socket,
# and raise AutoReconnect.
raise socket.error(str(e))
finally:
# do this here in case main.switch throws
# Remove timeout handle if set, since we've completed call
if self.timeout_handle:
self.io_loop.remove_timeout(self.timeout_handle)
self.timeout_handle = None
# disable the callback to raise exception in this greenlet on socket
# close, since the greenlet won't be around to raise the exception
# in (and it'll be caught on the next query and raise an
# AutoReconnect, which gets handled properly)
self.stream.set_close_callback(None)
def cleanup_cb():
self.stream.close()
try:
self.pool_ref._socket_semaphore.release()
except weakref.ReferenceError:
# pool was gc'ed
pass
_check_deadline(cleanup_cb)
return _green_sock_method
class GreenletSocket(object):
"""Replace socket with a class that yields from the current greenlet, if
we're on a child greenlet, when making blocking calls, and uses Tornado
IOLoop to schedule child greenlet for resumption when I/O is ready.
We only implement those socket methods actually used by pymongo.
"""
def __init__(self, sock, io_loop, use_ssl=False, pool_ref=None):
self.use_ssl = use_ssl
self.io_loop = io_loop
self.timeout = None
self.timeout_handle = None
self.pool_ref = pool_ref
if self.use_ssl:
raise Exception("SSL isn't supported")
else:
self.stream = MongoIOStream(sock, io_loop=io_loop)
def setsockopt(self, *args, **kwargs):
self.stream.socket.setsockopt(*args, **kwargs)
def settimeout(self, timeout):
self.timeout = timeout
def _switch_and_close(self):
# called on timeout to switch back to child greenlet
self.close()
if self.child_gr is not None:
self.child_gr.throw(IOError("Socket timed out"))
@green_sock_method
def connect(self, pair):
# do the connect on the underlying socket asynchronously...
self.stream.connect(pair, greenlet.getcurrent().switch)
def sendall(self, data):
# do the send on the underlying socket synchronously...
try:
self.stream.write(data)
except IOError as e:
raise socket.error(str(e))
if self.stream.closed():
raise socket.error("connection closed")
def recv(self, num_bytes):
# if we have enough bytes in our local buffer, don't yield
if self.stream.can_read_sync(num_bytes):
return self.stream._consume(num_bytes)
# else yield while we wait on Mongo to send us more
else:
return self.recv_async(num_bytes)
@green_sock_method
def recv_async(self, num_bytes):
# do the recv on the underlying socket... come back to the current
# greenlet when it's done
return self.stream.read_bytes(num_bytes, greenlet.getcurrent().switch)
def close(self):
# since we're explicitly handling closing here, don't raise an exception
# via the callback
self.stream.set_close_callback(None)
sock = self.stream.socket
try:
try:
self.stream.close()
except KeyError:
# Tornado's _impl (epoll, kqueue, ...) has already removed this
# file descriptor from its dict.
pass
finally:
# Sometimes necessary to avoid ResourceWarnings in Python 3:
# specifically, if the fd is closed from the OS's view, then
# stream.close() throws an exception, but the socket still has an
# fd and so will print a ResourceWarning. In that case, calling
# sock.close() directly clears the fd and does not raise an error.
if sock:
sock.close()
def fileno(self):
return self.stream.socket.fileno()
class GreenletPool(pymongo.pool.Pool):
"""A simple connection pool of GreenletSockets.
"""
def __init__(self, *args, **kwargs):
io_loop = kwargs.pop('io_loop', None)
self.io_loop = io_loop if io_loop else ioloop.IOLoop.instance()
pymongo.pool.Pool.__init__(self, *args, **kwargs)
if self.max_size is not None and self.wait_queue_multiple:
raise ValueError("GreenletPool doesn't support wait_queue_multiple")
# HACK [adam Dec/6/14]: need to use our IOLoop/greenlet semaphore
# implementation, so override what Pool.__init__ sets
# self._socket_semaphore to here
self._socket_semaphore = GreenletBoundedSemaphore(self.max_size)
def create_connection(self):
"""Copy of BasePool.connect()
"""
assert greenlet.getcurrent().parent, "Should be on child greenlet"
host, port = self.pair
# Don't try IPv6 if we don't support it. Also skip it if host
# is 'localhost' (::1 is fine). Avoids slow connect issues
# like PYTHON-356.
family = socket.AF_INET
if socket.has_ipv6 and host != 'localhost':
family = socket.AF_UNSPEC
err = None
for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
af, socktype, proto, dummy, sa = res
green_sock = None
try:
sock = socket.socket(af, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
green_sock = GreenletSocket(
sock, self.io_loop, use_ssl=self.use_ssl,
pool_ref=weakref.proxy(self))
# GreenletSocket will pause the current greenlet and resume it
# when connection has completed
green_sock.settimeout(self.conn_timeout)
green_sock.connect(sa)
green_sock.settimeout(self.net_timeout)
return green_sock
except socket.error, e:
err = e
if green_sock is not None:
green_sock.close()
if err is not None:
# pylint: disable=E0702
raise err
else:
# This likely means we tried to connect to an IPv6 only
# host with an OS/kernel or Python interpeter that doesn't
# support IPv6.
raise socket.error('getaddrinfo failed')
class GreenletEvent(object):
def __init__(self, io_loop):
self.io_loop = io_loop
self._flag = False
self._waiters = []
def is_set(self):
return self._flag
isSet = is_set
def set(self):
self._flag = True
waiters, self._waiters = self._waiters, []
# wake up all the greenlets that were waiting
for waiter in waiters:
self.io_loop.add_callback(waiter.switch)
def clear(self):
self._flag = False
def wait(self):
current = greenlet.getcurrent()
parent = current.parent
assert parent, "Must be called on child greenlet"
# yield back to the IOLoop if we have to wait
if not self._flag:
self._waiters.append(current)
try:
parent.switch()
finally:
# don't need callback because we haven't taken any resources
_check_deadline()
return self._flag
class GreenletSemaphore(object):
"""
Tornado IOLoop+Greenlet-based Semaphore class
"""
def __init__(self, value=1, io_loop=None):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
self._value = value
self._waiters = []
self._waiter_timeouts = {}
self._ioloop = io_loop if io_loop else ioloop.IOLoop.instance()
def _handle_timeout(self, timeout_gr):
if len(self._waiters) > 1000:
import os
logging.error('waiters size: %s on pid: %s', len(self._waiters),
os.getpid())
# should always be there, but add some safety just in case
if timeout_gr in self._waiters:
self._waiters.remove(timeout_gr)
if timeout_gr in self._waiter_timeouts:
self._waiter_timeouts.pop(timeout_gr)
timeout_gr.switch()
def acquire(self, blocking=True, timeout=None):
if not blocking and timeout is not None:
raise ValueError("can't specify timeout for non-blocking acquire")
current = greenlet.getcurrent()
parent = current.parent
assert parent, "Must be called on child greenlet"
start_time = time.time()
# if the semaphore has a postive value, subtract 1 and return True
if self._value > 0:
self._value -= 1
return True
elif not blocking:
# non-blocking mode, just return False
return False
# otherwise, we don't get the semaphore...
while True:
self._waiters.append(current)
if timeout:
callback = functools.partial(self._handle_timeout, current)
self._waiter_timeouts[current] = \
self._ioloop.add_timeout(time.time() + timeout,
callback)
# yield back to the parent, returning when someone releases the
# semaphore
#
# because of the async nature of the way we yield back, we're
# not guaranteed to actually *get* the semaphore after returning
# here (someone else could acquire() between the release() and
# this greenlet getting rescheduled). so we go back to the loop
# and try again.
#
# this design is not strictly fair and it's possible for
# greenlets to starve, but it strikes me as unlikely in
# practice.
try:
parent.switch()
finally:
# need to wake someone else up if we were the one
# given the semaphore
def _cleanup_cb():
if self._value > 0:
self._value -= 1
self.release()
_check_deadline(_cleanup_cb)
if self._value > 0:
self._value -= 1
if hasattr(current, '__mongoengine_comment__'):
current.add_mongo_start(
current.__mongoengine_comment__, time.time())
return True
# if we timed out, just return False instead of retrying
if timeout and (time.time() - start_time) >= timeout:
return False
__enter__ = acquire
def release(self):
current = greenlet.getcurrent()
if hasattr(current, '__mongoengine_comment__'):
is_scatter_gather = False
if hasattr(current, '__scatter_gather__'):
is_scatter_gather = current.__scatter_gather__
current.add_mongo_end(
current.__mongoengine_comment__, time.time(),
is_scatter_gather)
self._value += 1
if self._waiters:
waiting_gr = self._waiters.pop(0)
# remove the timeout
if waiting_gr in self._waiter_timeouts:
timeout = self._waiter_timeouts.pop(waiting_gr)
self._ioloop.remove_timeout(timeout)
# schedule the waiting greenlet to try to acquire
self._ioloop.add_callback(waiting_gr.switch)
def __exit__(self, t, v, tb):
self.release()
@property
def counter(self):
return self._value
class GreenletBoundedSemaphore(GreenletSemaphore):
"""Semaphore that checks that # releases is <= # acquires"""
def __init__(self, value=1):
GreenletSemaphore.__init__(self, value)
self._initial_value = value
def release(self):
if self._value >= self._initial_value:
raise ValueError("Semaphore released too many times")
return GreenletSemaphore.release(self)
class GreenletPeriodicExecutor(object):
_executors = set()
def __init__(self, interval, dummy, target, io_loop):
# dummy is in the place of min_interval which has no semantic
# equivalent in this implementation
self._interval = interval
self._target = target
self._io_loop = io_loop
self._stopped = True
self._next_timeout = None
# make sure multiple calls to wake() only schedules once
self._scheduled = False
# i'm about 90% sure these three methods are pymongo's safeguard against
# forgetting to close these things themselves
@classmethod
def _register_executor(cls, executor):
ref = weakref.ref(executor, cls._on_executor_deleted)
cls._executors.add(ref)
@classmethod
def _on_executor_deleted(cls, ref):
cls._executors.remove(ref)
@classmethod
def _shutdown_executors(cls):
executors = list(cls._executors)
for ref in executors:
executor = ref()
if executor:
executor.close()
def open(self):
if self._stopped:
if not self._next_timeout and not self._scheduled:
self._io_loop.add_callback(self._execute)
self._scheduled = True
self._stopped = False
def wake(self):
if not self._stopped:
# schedule immediately
self._cancel_next_run()
if not self._scheduled:
self._io_loop.add_callback(self._execute)
self._scheduled = True
def close(self, dummy=None):
self._stopped = True
self._cancel_next_run()
def join(self, timeout=None):
pass
def _cancel_next_run(self):
if self._next_timeout:
self._io_loop.remove_timeout(self._next_timeout)
def _execute(self):
self._next_timeout = None
self._scheduled = False
# cover the case where close is called after wake
if self._stopped:
return
try:
if not self._target():
self._stopped = True
return
except Exception:
self._stopped = True
# NOTE: this is an implementation difference from the real
# PeriodicExecutor. the real one ends up killing the thread, while
# this one propogates to the IOLoop handler.
raise
iotimeout = time.time() + self._interval
self._next_timeout = self._io_loop.add_timeout(iotimeout,
self._execute)
atexit.register(GreenletPeriodicExecutor._shutdown_executors)
class GreenletLock(object):
# we need to replace the internal lock do avoid the following scenario:
# greenlet 1:
# with lock:
# # do some io-blocking action, context switch to greenlet 2
#
# greenlet 2:
# with lock: # deadlock
#
# we can't just replace it with an RLock:
# greenlet 1:
# with lock:
# # do some action only one thread of control is expected
# # do some io-blocking action, context switch to greenlet 2
# greenlet 2:
# with lock:
# # lock is granted, potentially corrupting state for greenlet 1
# don't need to be too fancy or thread-safe because it's only coroutines
def __init__(self, io_loop):
# not an rlock, so we don't need to keep track of the holder,
# but might as well for sanity-checking
self.holder = None
self.waiters = []
self.io_loop = io_loop
def acquire(self, blocking=True):
current = greenlet.getcurrent()
parent = current.parent
assert parent, "Must be called on child greenlet"
while self.holder:
if blocking:
self.waiters.append(current)
parent.switch()
else:
return False
self.holder = current
def release(self):
current = greenlet.getcurrent()
assert self.holder is current, 'must be held'
self.holder = None
if self.waiters:
waiter = self.waiters.pop(0)
self.io_loop.add_callback(waiter.switch)
def __enter__(self):
self.acquire()
def __exit__(self, *args):
self.release()
class GreenletCondition(object):
# replacement class for threading.Condition
# only implements the methods used by pymongo.
def __init__(self, io_loop, lock):
self.lock = lock
self.waiters = []
self.waiter_timeouts = {}
self.io_loop = io_loop
def _handle_timeout(self, timeout_gr):
self.waiters.remove(timeout_gr)
self.waiter_timeouts.pop(timeout_gr)
timeout_gr.switch()
def wait(self, timeout=None):
current = greenlet.getcurrent()
parent = current.parent
assert parent, "Must be called on child greenlet"
assert self.lock.holder is current, 'must hold lock'
# yield back to the IOLoop
self.waiters.append(current)
if timeout:
callback = functools.partial(self._handle_timeout, current)
iotimeout = timeout + time.time()
self.waiter_timeouts[current] = self.io_loop.add_timeout(iotimeout,
callback)
self.lock.release()
# we'll be returned to by the timeout or by notify_all
parent.switch()
self.lock.acquire()
def notify_all(self):
current = greenlet.getcurrent()
assert self.lock.holder is current, 'must hold lock'
waiters, self.waiters = self.waiters, []
for waiter in waiters:
self.io_loop.add_callback(waiter.switch)
if waiter in self.waiter_timeouts:
timeout = self.waiter_timeouts.pop(waiter)
self.io_loop.remove_timeout(timeout)
class GreenletClient(object):
client = None
@classmethod
def sync_connect(cls, *args, **kwargs):
"""
Makes a synchronous connection to pymongo using Greenlets
Fire up the IOLoop to do the connect, then stop it.
"""
assert not greenlet.getcurrent().parent, "must be run on root greenlet"
def _inner_connect(io_loop, *args, **kwargs):
# asynchronously create a MongoClient using our IOLoop
try:
kwargs['use_greenlets'] = False
kwargs['_pool_class'] = GreenletPool
kwargs['_event_class'] = functools.partial(GreenletEvent,
io_loop)
cls.client = pymongo.mongo_client.MongoClient(*args, **kwargs)
except:
logging.exception("Failed to connect to MongoDB")
finally:
io_loop.stop()
# clear cls.client so we can't return an old one
if cls.client is not None:
try:
# manually close old unused connection
cls.client.close()
except:
logging.exception("Clearing old pymongo connection")
cls.client = None
# do the connection
io_loop = ioloop.IOLoop.instance()
conn_gr = greenlet.greenlet(_inner_connect)
# run the connect when the ioloop starts
io_loop.add_callback(functools.partial(conn_gr.switch,
io_loop, *args, **kwargs))
# start the ioloop
io_loop.start()
return cls.client
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and Benchmarks for Densenet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import time
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.densenet import densenet
from tensorflow.python.client import device_lib
class DensenetTest(tf.test.TestCase):
def test_bottleneck_true(self):
depth = 7
growth_rate = 2
num_blocks = 3
output_classes = 10
num_layers_in_each_block = -1
batch_size = 1
data_format = ('channels_first') if tf.test.is_gpu_available() else (
'channels_last')
model = densenet.DenseNet(depth, growth_rate, num_blocks,
output_classes, num_layers_in_each_block,
data_format, bottleneck=True, compression=0.5,
weight_decay=1e-4, dropout_rate=0,
pool_initial=False, include_top=True)
if data_format == 'channels_last':
rand_input = tf.random_uniform((batch_size, 32, 32, 3))
else:
rand_input = tf.random_uniform((batch_size, 3, 32, 32))
output_shape = model(rand_input).shape
self.assertEqual(output_shape, (batch_size, output_classes))
def test_bottleneck_false(self):
depth = 7
growth_rate = 2
num_blocks = 3
output_classes = 10
num_layers_in_each_block = -1
batch_size = 1
data_format = ('channels_first') if tf.test.is_gpu_available() else (
'channels_last')
model = densenet.DenseNet(depth, growth_rate, num_blocks,
output_classes, num_layers_in_each_block,
data_format, bottleneck=False, compression=0.5,
weight_decay=1e-4, dropout_rate=0,
pool_initial=False, include_top=True)
if data_format == 'channels_last':
rand_input = tf.random_uniform((batch_size, 32, 32, 3))
else:
rand_input = tf.random_uniform((batch_size, 3, 32, 32))
output_shape = model(rand_input).shape
self.assertEqual(output_shape, (batch_size, output_classes))
def test_pool_initial_true(self):
depth = 7
growth_rate = 2
num_blocks = 4
output_classes = 10
num_layers_in_each_block = [1, 2, 2, 1]
batch_size = 1
data_format = ('channels_first') if tf.test.is_gpu_available() else (
'channels_last')
model = densenet.DenseNet(depth, growth_rate, num_blocks,
output_classes, num_layers_in_each_block,
data_format, bottleneck=True, compression=0.5,
weight_decay=1e-4, dropout_rate=0,
pool_initial=True, include_top=True)
if data_format == 'channels_last':
rand_input = tf.random_uniform((batch_size, 32, 32, 3))
else:
rand_input = tf.random_uniform((batch_size, 3, 32, 32))
output_shape = model(rand_input).shape
self.assertEqual(output_shape, (batch_size, output_classes))
def test_regularization(self):
if tf.test.is_gpu_available():
rand_input = tf.random_uniform((10, 3, 32, 32))
data_format = 'channels_first'
else:
rand_input = tf.random_uniform((10, 32, 32, 3))
data_format = 'channels_last'
weight_decay = 1e-4
conv = tf.keras.layers.Conv2D(
3, (3, 3),
padding='same',
use_bias=False,
data_format=data_format,
kernel_regularizer=tf.keras.regularizers.l2(weight_decay))
optimizer = tf.train.GradientDescentOptimizer(0.1)
conv(rand_input) # Initialize the variables in the layer
def compute_true_l2(vs, wd):
return tf.reduce_sum(tf.square(vs)) * wd
true_l2 = compute_true_l2(conv.variables, weight_decay)
keras_l2 = tf.add_n(conv.losses)
self.assertAllClose(true_l2, keras_l2)
with tf.GradientTape() as tape_true, tf.GradientTape() as tape_keras:
loss = tf.reduce_sum(conv(rand_input))
loss_with_true_l2 = loss + compute_true_l2(conv.variables, weight_decay)
loss_with_keras_l2 = loss + tf.add_n(conv.losses)
true_grads = tape_true.gradient(loss_with_true_l2, conv.variables)
keras_grads = tape_keras.gradient(loss_with_keras_l2, conv.variables)
self.assertAllClose(true_grads, keras_grads)
optimizer.apply_gradients(zip(keras_grads, conv.variables))
keras_l2_after_update = tf.add_n(conv.losses)
self.assertNotAllClose(keras_l2, keras_l2_after_update)
def compute_gradients(model, images, labels):
with tf.GradientTape() as tape:
logits = model(images, training=True)
cross_ent = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels)
regularization = tf.add_n(model.losses)
loss = cross_ent + regularization
tf.contrib.summary.scalar(name='loss', tensor=loss)
return tape.gradient(loss, model.variables)
def apply_gradients(model, optimizer, gradients):
optimizer.apply_gradients(zip(gradients, model.variables))
def device_and_data_format():
return ('/gpu:0',
'channels_first') if tf.test.is_gpu_available() else ('/cpu:0',
'channels_last')
def random_batch(batch_size, data_format):
shape = (3, 224, 224) if data_format == 'channels_first' else (224, 224, 3)
shape = (batch_size,) + shape
num_classes = 1000
images = tf.random_uniform(shape)
labels = tf.random_uniform(
[batch_size], minval=0, maxval=num_classes, dtype=tf.int32)
one_hot = tf.one_hot(labels, num_classes)
return images, one_hot
class MockIterator(object):
def __init__(self, tensors):
self._tensors = [tf.identity(x) for x in tensors]
def next(self):
return self._tensors
class DensenetBenchmark(tf.test.Benchmark):
def __init__(self):
self.depth = 121
self.growth_rate = 32
self.num_blocks = 4
self.output_classes = 1000
self.num_layers_in_each_block = [6, 12, 24, 16]
def _train_batch_sizes(self):
"""Choose batch sizes based on GPU capability."""
for device in device_lib.list_local_devices():
if tf.DeviceSpec.from_string(device.name).device_type == 'GPU':
if 'K20' in device.physical_device_desc:
return (16,)
if 'P100' in device.physical_device_desc:
return (16, 32, 64)
if tf.DeviceSpec.from_string(device.name).device_type == 'TPU':
return (32,)
return (16, 32)
def _report(self, label, start, num_iters, device, batch_size, data_format):
avg_time = (time.time() - start) / num_iters
dev = tf.DeviceSpec.from_string(device).device_type.lower()
name = '%s_%s_batch_%d_%s' % (label, dev, batch_size, data_format)
extras = {'examples_per_sec': batch_size / avg_time}
self.report_benchmark(
iters=num_iters, wall_time=avg_time, name=name, extras=extras)
def _force_device_sync(self):
# If this function is called in the context of a non-CPU device
# (e.g., inside a 'with tf.device("/gpu:0")' block)
# then this will force a copy from CPU->NON_CPU_DEVICE->CPU,
# which forces a sync. This is a roundabout way, yes.
tf.constant(1.).cpu()
def _benchmark_eager_apply(self, label, device_and_format, defun=False,
execution_mode=None):
with tfe.execution_mode(execution_mode):
device, data_format = device_and_format
model = densenet.DenseNet(self.depth, self.growth_rate, self.num_blocks,
self.output_classes,
self.num_layers_in_each_block, data_format,
bottleneck=True, compression=0.5,
weight_decay=1e-4, dropout_rate=0,
pool_initial=True, include_top=True)
if defun:
# TODO(apassos) enable tfe.function here
model.call = tfe.defun(model.call)
batch_size = 64
num_burn = 5
num_iters = 30
with tf.device(device):
images, _ = random_batch(batch_size, data_format)
for _ in xrange(num_burn):
model(images, training=False).cpu()
if execution_mode:
tfe.async_wait()
gc.collect()
start = time.time()
for _ in xrange(num_iters):
model(images, training=False).cpu()
if execution_mode:
tfe.async_wait()
self._report(label, start, num_iters, device, batch_size, data_format)
def benchmark_eager_apply_sync(self):
self._benchmark_eager_apply('eager_apply', device_and_data_format(),
defun=False)
def benchmark_eager_apply_async(self):
self._benchmark_eager_apply(
'eager_apply_async', device_and_data_format(), defun=False,
execution_mode=tfe.ASYNC)
def benchmark_eager_apply_with_defun(self):
self._benchmark_eager_apply('eager_apply_with_defun',
device_and_data_format(), defun=True)
def _benchmark_eager_train(self,
label,
make_iterator,
device_and_format,
defun=False,
execution_mode=None):
with tfe.execution_mode(execution_mode):
device, data_format = device_and_format
for batch_size in self._train_batch_sizes():
(images, labels) = random_batch(batch_size, data_format)
model = densenet.DenseNet(self.depth, self.growth_rate, self.num_blocks,
self.output_classes,
self.num_layers_in_each_block, data_format,
bottleneck=True, compression=0.5,
weight_decay=1e-4, dropout_rate=0,
pool_initial=True, include_top=True)
optimizer = tf.train.GradientDescentOptimizer(0.1)
apply_grads = apply_gradients
if defun:
model.call = tfe.defun(model.call)
apply_grads = tfe.defun(apply_gradients)
num_burn = 3
num_iters = 10
with tf.device(device):
iterator = make_iterator((images, labels))
for _ in xrange(num_burn):
(images, labels) = iterator.next()
apply_grads(model, optimizer,
compute_gradients(model, images, labels))
if execution_mode:
tfe.async_wait()
self._force_device_sync()
gc.collect()
start = time.time()
for _ in xrange(num_iters):
(images, labels) = iterator.next()
apply_grads(model, optimizer,
compute_gradients(model, images, labels))
if execution_mode:
tfe.async_wait()
self._force_device_sync()
self._report(label, start, num_iters, device, batch_size, data_format)
def benchmark_eager_train_sync(self):
self._benchmark_eager_train('eager_train', MockIterator,
device_and_data_format(), defun=False)
def benchmark_eager_train_async(self):
self._benchmark_eager_train(
'eager_train_async',
MockIterator,
device_and_data_format(),
defun=False,
execution_mode=tfe.ASYNC)
def benchmark_eager_train_with_defun(self):
self._benchmark_eager_train(
'eager_train_with_defun', MockIterator,
device_and_data_format(), defun=True)
def benchmark_eager_train_datasets(self):
def make_iterator(tensors):
with tf.device('/device:CPU:0'):
ds = tf.data.Dataset.from_tensors(tensors).repeat()
return tfe.Iterator(ds)
self._benchmark_eager_train(
'eager_train_dataset', make_iterator,
device_and_data_format(), defun=False)
def benchmark_eager_train_datasets_with_defun(self):
def make_iterator(tensors):
with tf.device('/device:CPU:0'):
ds = tf.data.Dataset.from_tensors(tensors).repeat()
return tfe.Iterator(ds)
self._benchmark_eager_train(
'eager_train_dataset_with_defun', make_iterator,
device_and_data_format(), defun=True)
if __name__ == '__main__':
tf.enable_eager_execution()
tf.test.main()
|
|
from __future__ import absolute_import
__author__ = 'katharine'
from six.moves import range
from six import iteritems
import bz2
import errno
import json
import logging
import os
import os.path
import platform
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import time
from libpebble2.communication.transports.websocket import WebsocketTransport
from libpebble2.exceptions import ConnectionError
from pebble_tool.account import get_default_account
from pebble_tool.exceptions import MissingEmulatorError, ToolError
from pebble_tool.util.analytics import post_event
from . import sdk_path, get_sdk_persist_dir, sdk_manager
logger = logging.getLogger("pebble_tool.sdk.emulator")
black_hole = open(os.devnull, 'w')
def get_emulator_info_path():
return os.path.join(tempfile.gettempdir(), 'pb-emulator.json')
def get_all_emulator_info():
try:
with open(get_emulator_info_path()) as f:
return json.load(f)
except (OSError, IOError):
return {}
def get_emulator_info(platform, version=None):
info = get_all_emulator_info().get(platform, None)
# If we have nothing for the platform, it's None
if info is None:
return None
# If a specific version was requested, return that directly.
if version is not None:
return info.get(version, None)
# If a version wasn't requested, look for one that's alive.
# If exactly one is alive, return that.
alive = []
for sdk_version, sdk_info in iteritems(info):
if ManagedEmulatorTransport.is_emulator_alive(platform, sdk_version):
alive.append(sdk_version)
else:
# Clean up dead entries that are left hanging around.
update_emulator_info(platform, sdk_version, None)
if len(alive) > 1:
raise ToolError("There are multiple {} emulators (versions {}) running. You must specify a version."
.format(platform, ', '.join(alive)))
elif len(alive) == 0:
return None
else:
return info[alive[0]]
def update_emulator_info(platform, version, new_content):
try:
with open(get_emulator_info_path()) as f:
content = json.load(f)
except (OSError, IOError):
content = {}
if new_content is None:
del content.get(platform, {version: None})[version]
else:
content.setdefault(platform, {})[version] = new_content
with open(get_emulator_info_path(), 'w') as f:
json.dump(content, f, indent=4)
class ManagedEmulatorTransport(WebsocketTransport):
def __init__(self, platform, version=None):
self.platform = platform
self.version = version
self._find_ports()
super(ManagedEmulatorTransport, self).__init__('ws://localhost:{}/'.format(self.pypkjs_port))
def connect(self):
self._spawn_processes()
for i in range(10):
time.sleep(0.5)
try:
super(ManagedEmulatorTransport, self).connect()
except ConnectionError:
continue
else:
return
super(ManagedEmulatorTransport, self).connect()
def _find_ports(self):
info = get_emulator_info(self.platform, self.version)
qemu_running = False
if info is not None:
self.version = info['version']
if self._is_pid_running(info['qemu']['pid']):
qemu_running = True
self.qemu_port = info['qemu']['port']
self.qemu_serial_port = info['qemu']['serial']
self.qemu_pid = info['qemu']['pid']
self.qemu_gdb_port = info['qemu'].get('gdb', None)
else:
self.qemu_pid = None
if self._is_pid_running(info['pypkjs']['pid']):
if qemu_running:
self.pypkjs_port = info['pypkjs']['port']
self.pypkjs_pid = info['pypkjs']['pid']
else:
logger.info("pypkjs is alive, but qemu is not, so we're killing it.")
os.kill(info['pypkjs']['pid'], signal.SIGKILL)
self.pypkjs_pid = None
else:
self.pypkjs_pid = None
else:
self.qemu_pid = None
self.pypkjs_pid = None
if self.qemu_pid is None:
self.qemu_port = self._choose_port()
self.qemu_serial_port = self._choose_port()
self.qemu_gdb_port = self._choose_port()
if self.pypkjs_pid is None:
self.pypkjs_port = self._choose_port()
def _spawn_processes(self):
if self.version is None:
sdk_path() # Force an SDK to be installed.
self.version = sdk_manager.get_current_sdk()
if self.qemu_pid is None:
logger.info("Spawning QEMU.")
self._spawn_qemu()
else:
logger.info("QEMU is already running.")
if self.pypkjs_pid is None:
logger.info("Spawning pypkjs.")
self._spawn_pypkjs()
else:
logger.info("pypkjs is already running.")
self._save_state()
def _save_state(self):
d = {
'qemu': {
'pid': self.qemu_pid,
'port': self.qemu_port,
'serial': self.qemu_serial_port,
'gdb': self.qemu_gdb_port,
},
'pypkjs': {
'pid': self.pypkjs_pid,
'port': self.pypkjs_port,
},
'version': self.version,
}
update_emulator_info(self.platform, self.version, d)
def _spawn_qemu(self):
qemu_bin = os.environ.get('PEBBLE_QEMU_PATH', 'qemu-pebble')
qemu_micro_flash = os.path.join(sdk_manager.path_for_sdk(self.version), 'pebble', self.platform, 'qemu',
"qemu_micro_flash.bin")
qemu_spi_flash = self._get_spi_path()
for path in (qemu_micro_flash, qemu_spi_flash):
if not os.path.exists(path):
raise MissingEmulatorError("Can't launch emulator: missing required file at {}".format(path))
command = [
qemu_bin,
"-rtc", "base=localtime",
"-serial", "null",
"-serial", "tcp::{},server,nowait".format(self.qemu_port),
"-serial", "tcp::{},server".format(self.qemu_serial_port),
"-pflash", qemu_micro_flash,
"-gdb", "tcp::{},server,nowait".format(self.qemu_gdb_port),
]
platform_args = {
'emery': [
'-machine', 'pebble-robert-bb',
'-cpu', 'cortex-m4',
'-pflash', qemu_spi_flash,
],
'diorite': [
'-machine', 'pebble-silk-bb',
'-cpu', 'cortex-m4',
'-mtdblock', qemu_spi_flash,
],
'chalk': [
'-machine', 'pebble-s4-bb',
'-cpu', 'cortex-m4',
'-pflash', qemu_spi_flash,
],
'basalt': [
'-machine', 'pebble-snowy-bb',
'-cpu', 'cortex-m4',
'-pflash', qemu_spi_flash,
],
'aplite': [
'-machine', 'pebble-bb2',
'-cpu', 'cortex-m3',
'-mtdblock', qemu_spi_flash,
]
}
command.extend(platform_args[self.platform])
logger.info("Qemu command: %s", subprocess.list2cmdline(command))
process = subprocess.Popen(command, stdout=self._get_output(), stderr=self._get_output())
time.sleep(0.2)
if process.poll() is not None:
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise MissingEmulatorError("Couldn't launch emulator:\n{}".format(e.output.strip()))
self.qemu_pid = process.pid
self._wait_for_qemu()
def _wait_for_qemu(self):
logger.info("Waiting for the firmware to boot.")
for i in range(20):
time.sleep(0.2)
try:
s = socket.create_connection(('localhost', self.qemu_serial_port))
except socket.error:
logger.debug("QEMU not ready yet.")
pass
else:
break
else:
post_event("qemu_launched", success=False, reason="qemu_launch_timeout")
raise ToolError("Emulator launch timed out.")
received = b''
while True:
try:
received += s.recv(256)
except socket.error as e:
# Ignore "Interrupted system call"
if e.errno != errno.EINTR:
raise
if b"<SDK Home>" in received or b"<Launcher>" in received or b"Ready for communication" in received:
break
s.close()
post_event("qemu_launched", success=True)
logger.info("Firmware booted.")
def _copy_spi_image(self, path):
sdk_qemu_spi_flash = os.path.join(sdk_path(), 'pebble', self.platform, 'qemu', 'qemu_spi_flash.bin.bz2')
if not os.path.exists(sdk_qemu_spi_flash):
raise MissingEmulatorError("Your SDK does not support the Pebble Emulator.")
else:
try:
os.makedirs(os.path.dirname(path))
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Copy the compressed file.
with bz2.BZ2File(sdk_qemu_spi_flash) as from_file:
with open(path, 'wb') as to_file:
while True:
data = from_file.read(512)
if not data:
break
to_file.write(data)
def _get_spi_path(self):
platform = self.platform
if sdk_manager.get_current_sdk() == 'tintin':
sdk_qemu_spi_flash = os.path.join(sdk_manager.path_for_sdk(self.version), 'pebble', platform, 'qemu',
'qemu_spi_flash.bin')
return sdk_qemu_spi_flash
path = os.path.join(get_sdk_persist_dir(platform, self.version), 'qemu_spi_flash.bin')
if not os.path.exists(path):
self._copy_spi_image(path)
return path
def _spawn_pypkjs(self):
phonesim_bin = os.environ.get('PHONESIM_PATH', 'phonesim.py')
layout_file = os.path.join(sdk_manager.path_for_sdk(self.version), 'pebble', self.platform, 'qemu',
"layouts.json")
command = [
sys.executable,
phonesim_bin,
"--qemu", "localhost:{}".format(self.qemu_port),
"--port", str(self.pypkjs_port),
"--persist", get_sdk_persist_dir(self.platform, self.version),
"--layout", layout_file,
'--debug',
]
account = get_default_account()
if account.is_logged_in:
command.extend(['--oauth', account.bearer_token])
if logger.getEffectiveLevel() <= logging.DEBUG:
command.append('--debug')
logger.info("pypkjs command: %s", subprocess.list2cmdline(command))
process = subprocess.Popen(command, stdout=self._get_output(), stderr=self._get_output())
time.sleep(0.5)
if process.poll() is not None:
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise MissingEmulatorError("Couldn't launch pypkjs:\n{}".format(e.output.strip()))
self.pypkjs_pid = process.pid
def _get_output(self):
if logger.getEffectiveLevel() <= logging.DEBUG:
return None
else:
return black_hole
@classmethod
def _choose_port(cls):
sock = socket.socket()
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.close()
return port
@classmethod
def _is_pid_running(cls, pid):
# PBL-21228: This isn't going to work on Windows.
try:
os.kill(pid, 0)
except OSError as e:
if e.errno == 3:
return False
else:
raise
return True
@classmethod
def is_emulator_alive(cls, platform, version=None):
info = get_emulator_info(platform, version or sdk_manager.get_current_sdk())
if info is None:
return False
return cls._is_pid_running(info['pypkjs']['pid']) and cls._is_pid_running(info['pypkjs']['pid'])
|
|
import sys
import inspect
from collections import namedtuple
from functools import update_wrapper
if sys.version_info < (3,): # Python 2
from httplib import responses as http_reasons
from cStringIO import StringIO as BytesIO
from urlparse import urlparse, parse_qsl
def _exec(code, g):
exec('exec code in g')
else: # Python 3
from http.client import responses as http_reasons
from io import BytesIO
from urllib.parse import urlparse, parse_qsl
_exec = getattr(__import__('builtins'), 'exec')
unicode = str
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
Call = namedtuple('Call', ['request', 'response'])
Request = namedtuple('Request', ['method', 'url', 'body', 'headers',
'scheme', 'host', 'port'])
_urllib3_import = """\
from %(package)s.response import HTTPResponse
from %(package)s.exceptions import ProtocolError
"""
_wrapper_template = """\
def wrapper%(signature)s:
with responses:
return func%(funcargs)s
"""
__all__ = ['Responses']
def get_wrapped(func, wrapper_template, evaldict):
# Preserve the argspec for the wrapped function so that testing
# tools such as pytest can continue to use their fixture injection.
args, a, kw, defaults = inspect.getargspec(func)
signature = inspect.formatargspec(args, a, kw, defaults)
is_bound_method = hasattr(func, '__self__')
if is_bound_method:
args = args[1:] # Omit 'self'
callargs = inspect.formatargspec(args, a, kw, None)
ctx = {'signature': signature, 'funcargs': callargs}
_exec(wrapper_template % ctx, evaldict)
wrapper = evaldict['wrapper']
update_wrapper(wrapper, func)
if is_bound_method:
wrapper = wrapper.__get__(func.__self__, type(func.__self__))
return wrapper
class _FakeHeaders(list):
def get_all(self, key, default=None):
key = key.lower()
return [v for (k, v) in self if k.lower() == key]
getheaders = get_all
class _FakeResponse(object):
def __init__(self, headers):
self.msg = _FakeHeaders(headers)
def isclosed(self):
return False
class CallList(list):
def add(self, request, response):
self.append(Call(request, response))
class Responses(object):
ANY = mock.ANY
DELETE = 'DELETE'
GET = 'GET'
HEAD = 'HEAD'
OPTIONS = 'OPTIONS'
PATCH = 'PATCH'
POST = 'POST'
PUT = 'PUT'
def __init__(self, package='urllib3'):
evaldict = {}
_exec(_urllib3_import % {'package': package}, evaldict)
self._package = package
self._request_class = Request
self._response_class = evaldict['HTTPResponse']
self._error_class = evaldict['ProtocolError']
self.reset()
def reset(self):
self._urls = []
self._calls = CallList()
def add(self, method, url, body='', match_querystring=False,
status=200, adding_headers=None,
content_type='text/plain'):
# body must be bytes
if isinstance(body, unicode):
body = body.encode('utf-8')
self._urls.append({
'url': url,
'method': method,
'return': (status, adding_headers, body),
'content_type': content_type,
'match_querystring': match_querystring,
})
def add_callback(self, method, url, callback, match_querystring=False,
content_type='text/plain'):
self._urls.append({
'url': url,
'method': method,
'callback': callback,
'content_type': content_type,
'match_querystring': match_querystring,
})
@property
def calls(self):
return self._calls
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
self.stop()
self.reset()
def activate(self, func):
evaldict = {'responses': self, 'func': func}
return get_wrapped(func, _wrapper_template, evaldict)
def _find_match(self, request):
for match in self._urls:
if request.method == match['method'] and \
self._has_url_match(match, request.url):
return match
def _has_url_match(self, match, request_url):
url = match['url']
if hasattr(url, 'match'):
return url.match(request_url)
if match['match_querystring']:
return self._has_strict_url_match(url, request_url)
return url == request_url.partition('?')[0]
def _has_strict_url_match(self, url, other):
url_parsed = urlparse(url)
other_parsed = urlparse(other)
if url_parsed[:3] != other_parsed[:3]:
return False
url_qsl = sorted(parse_qsl(url_parsed.query))
other_qsl = sorted(parse_qsl(other_parsed.query))
return url_qsl == other_qsl
def _urlopen(self, pool, method, url, body=None, headers=None, **kwargs):
request = self._request_class(method, url, body, headers,
pool.scheme, pool.host, pool.port)
match = self._find_match(request)
if match is None:
error_msg = 'Connection refused: {0} {1}'.format(request.method,
request.url)
response = self._error_class(error_msg)
self._calls.add(request, response)
raise response
headers = [
('Content-Type', match['content_type']),
]
if 'callback' in match: # use callback
status, r_headers, body = match['callback'](request)
if isinstance(body, unicode):
body = body.encode('utf-8')
else:
status, r_headers, body = match['return']
if isinstance(body, Exception):
self._calls.add(request, body)
raise body
if hasattr(status, 'split'):
status, reason = status.split(None, 1)
status = int(status)
else:
reason = http_reasons.get(status)
if r_headers:
if hasattr(r_headers, 'items'):
r_headers = r_headers.items()
for key, value in r_headers:
if key.lower() == 'content-type':
if headers[0][0].lower() == 'content-type':
del headers[0] # No duplicate content_type
headers.append((key, value))
response = self._response_class(
status=status,
reason=reason,
body=BytesIO(body) if body else BytesIO(),
headers=headers,
preload_content=False,
original_response=_FakeResponse(headers),
)
self._calls.add(request, response)
return response
def start(self):
def _urlopen(pool, method, url, body=None, headers=None, **kwargs):
return self._urlopen(pool, method, url, body=body, headers=headers,
**kwargs)
target = self._package + '.connectionpool.HTTPConnectionPool.urlopen'
self._patcher = mock.patch(target, _urlopen)
self._patcher.start()
def stop(self):
self._patcher.stop()
|
|
from crowdsourcing.forms import *
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from rest_framework import views as rest_framework_views
from rest_framework.views import APIView
from rest_framework.renderers import JSONRenderer
from crowdsourcing.serializers.user import *
from crowdsourcing.serializers.project import *
from crowdsourcing.utils import *
from crowdsourcing.models import *
from rest_framework.exceptions import AuthenticationFailed
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from django.utils.decorators import method_decorator
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
class Logout(APIView):
def post(self, request, *args, **kwargs):
from django.contrib.auth import logout
logout(request)
return Response({}, status=status.HTTP_204_NO_CONTENT)
class Login(APIView):
method_decorator(csrf_protect)
def post(self, request, *args, **kwargs):
from django.contrib.auth import authenticate as auth_authenticate, login
# self.redirect_to = request.POST.get('next', '') #to be changed, POST does not contain any data
username = request.data.get('username', '')
password = request.data.get('password', '')
email_or_username = username
# match with username if not email
if not re.match(r"[^@]+@[^@]+\.[^@]+", email_or_username):
username = email_or_username
else:
user = get_model_or_none(User, email=email_or_username)
if user is not None:
username = user.username
user = auth_authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
response_data = dict()
response_data["username"] = user.username
response_data["email"] = user.email
response_data["first_name"] = user.first_name
response_data["last_name"] = user.last_name
response_data["date_joined"] = user.date_joined
response_data["last_login"] = user.last_login
return Response(response_data, status.HTTP_200_OK)
else:
raise AuthenticationFailed(_('Account is not activated yet.'))
else:
raise AuthenticationFailed(_('Username or password is incorrect.'))
class ForgotPassword(rest_framework_views.APIView):
"""
This takes care of the forgot password process.
"""
'''
def get_context_data(self, **kwargs):
context = super(ForgotPassword,self).get_context_data(**kwargs)
context['form'] = ForgotPasswordForm(self.request.POST or None)
return context
'''
def get(self, request, *args, **kwargs):
'''
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
'''
return Response({"status":"OK"}, status=status.HTTP_200_OK)
def post(self, request, *args, **kwargs):
"""
Here we process the POST and if the form is valid (i.e email is valid)
then we send a password reset link to the user.
"""
email = json.loads(request.body.decode('utf-8')).get('email','')
form = ForgotPasswordForm()
form.email = email
#temporary check, will be done properly
try:
form.clean()
except forms.ValidationError:
return Response({
'status': 'Error',
'message': 'Invalid email entered.'
}, status=status.HTTP_404_NOT_FOUND)
from crowdsourcing.models import PasswordResetModel
user = User.objects.get(email=email)
salt = hashlib.sha1(str(random.random()).encode('utf-8')).hexdigest()[:5]
username = user.username
reset_key = hashlib.sha1(str(salt+username).encode('utf-8')).hexdigest()
password_reset = PasswordResetModel()
password_reset.user = user
password_reset.reset_key = reset_key
if settings.EMAIL_ENABLED:
password_reset.save()
self.send_password_reset_email(email=email, host=request.get_host(), reset_key=reset_key)
return Response({
'status': 'Success',
'message': 'Email sent.'
}, status=status.HTTP_201_CREATED)
#return render(request,'registration/password_reset_email_sent.html')
#context['form'] = form
#return self.render_to_response(context)
#TODO timer for the reset key
#TODO HTML templates should be moved to files
def send_password_reset_email(email, host, reset_key):
"""
This sends the email to the user, it will be moved to a new class in the future so that all emails are
processed by one class.
The email includes two links, one for changing the password and the other for discarding the forgot password request.
"""
from django.core.mail import EmailMultiAlternatives
subject, from_email, to = 'Crowdsourcing Password Reset', settings.EMAIL_SENDER, email
reset_url = 'https://'+ host + '/reset-password/' +reset_key
text_content = 'Hello, \n ' \
'Please reset your password using the following link: \n' + reset_url+'/1' \
'\nIf you did not request a password reset please click the following link: ' +reset_url+'/0' \
'\nGreetings, \nCrowdsourcing Team'
html_content = '<h3>Hello,</h3>' \
'<p>Please reset your password using the following link: <br>' \
'<a href="'+reset_url+'/1'+'">'+reset_url+'/1'+'</a></p>' \
"<br><p>If you didn't request a password reset please click the following link: <br>" + '' \
'<a href="'+reset_url+'/0'+'">'+reset_url+'/0'+'</a><br><br> Greetings,<br> <strong>Crowdsourcing Team</strong>'
msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
msg.attach_alternative(html_content, "text/html")
msg.send()
class Oauth2TokenView(rest_framework_views.APIView):
def post(self, request, *args, **kwargs):
oauth2_login = Oauth2Utils()
response_data, oauth2_status = oauth2_login.get_token(request)
return Response(response_data,status=oauth2_status)
#Will be moved to Class Views
#################################################
def registration_successful(request):
return render(request, 'registration/registration_successful.html')
def home(request):
return render(request, 'base/index.html')
def activate_account(request, activation_key):
"""
this handles the account activation after the user follows the link from his/her email.
"""
from django.contrib.auth.models import User
try:
activate_user = models.RegistrationModel.objects.get(activation_key=activation_key)
if activate_user:
usr = User.objects.get(id=activate_user.user_id)
usr.is_active = 1
usr.save()
activate_user.delete()
return render(request,'registration/registration_complete.html')
except:
return HttpResponseRedirect('/')
#TODO check expired keys
def reset_password(request, reset_key, enable):
"""
Resets the user password if requested from the user.
"""
from crowdsourcing.models import PasswordResetModel
#this must be changed for angular support
form = PasswordResetForm(request.POST or None)
if enable == "1":
pass
#return render(request, 'registration/ignore_password_reset.html')
elif enable == "0":
try:
password_reset = PasswordResetModel.objects.get(reset_key=reset_key)
password_reset.delete()
except:
pass
return render(request, 'registration/ignore_password_reset.html')
if request.method == 'POST' and form.is_valid():
#try:
password_reset = PasswordResetModel.objects.get(reset_key=reset_key)
user = User.objects.get(id = password_reset.user_id)
user.set_password(request.POST['password1'])
user.save()
password_reset.delete()
return render(request, 'registration/password_reset_successful.html')
return render(request, 'registration/reset_password.html',{'form':form})
#################################################
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils.background_jobs import enqueue
from frappe.utils import get_url, get_datetime
from frappe.desk.form.utils import get_pdf_link
from frappe.utils.verified_command import get_signed_params, verify_request
from frappe import _
from frappe.model.workflow import apply_workflow, get_workflow_name, \
has_approval_access, get_workflow_state_field, send_email_alert
from frappe.desk.notifications import clear_doctype_notifications
class WorkflowAction(Document):
pass
def get_permission_query_conditions(user):
if not user: user = frappe.session.user
if user == "Administrator": return ""
return "(`tabWorkflow Action`.user='{user}')".format(user=user)
def has_permission(doc, user):
if user not in ['Administrator', doc.user]:
return False
def process_workflow_actions(doc, state):
workflow = get_workflow_name(doc.get('doctype'))
if not workflow: return
if is_workflow_action_already_created(doc): return
clear_old_workflow_actions(doc)
update_completed_workflow_actions(doc)
clear_doctype_notifications('Workflow Action')
next_possible_transitions = get_next_possible_transitions(workflow, get_doc_workflow_state(doc))
if not next_possible_transitions: return
user_data_map = get_users_next_action_data(next_possible_transitions, doc)
if not user_data_map: return
create_workflow_actions_for_users(user_data_map.keys(), doc)
if send_email_alert(workflow):
enqueue(send_workflow_action_email, queue='short', users_data=list(user_data_map.values()), doc=doc)
@frappe.whitelist(allow_guest=True)
def apply_action(action, doctype, docname, current_state, user=None, last_modified=None):
if not verify_request():
return
doc = frappe.get_doc(doctype, docname)
doc_workflow_state = get_doc_workflow_state(doc)
if doc_workflow_state == current_state:
action_link = get_confirm_workflow_action_url(doc, action, user)
if not last_modified or get_datetime(doc.modified) == get_datetime(last_modified):
return_action_confirmation_page(doc, action, action_link)
else:
return_action_confirmation_page(doc, action, action_link, alert_doc_change=True)
else:
return_link_expired_page(doc, doc_workflow_state)
@frappe.whitelist(allow_guest=True)
def confirm_action(doctype, docname, user, action):
if not verify_request():
return
logged_in_user = frappe.session.user
if logged_in_user == 'Guest' and user:
# to allow user to apply action without login
frappe.set_user(user)
doc = frappe.get_doc(doctype, docname)
newdoc = apply_workflow(doc, action)
frappe.db.commit()
return_success_page(newdoc)
# reset session user
frappe.set_user(logged_in_user)
def return_success_page(doc):
frappe.respond_as_web_page(_("Success"),
_("{0}: {1} is set to state {2}".format(
doc.get('doctype'),
frappe.bold(doc.get('name')),
frappe.bold(get_doc_workflow_state(doc))
)), indicator_color='green')
def return_action_confirmation_page(doc, action, action_link, alert_doc_change=False):
template_params = {
'title': doc.get('name'),
'doctype': doc.get('doctype'),
'docname': doc.get('name'),
'action': action,
'action_link': action_link,
'alert_doc_change': alert_doc_change
}
template_params['pdf_link'] = get_pdf_link(doc.get('doctype'), doc.get('name'))
frappe.respond_as_web_page(None, None,
indicator_color="blue",
template="confirm_workflow_action",
context=template_params)
def return_link_expired_page(doc, doc_workflow_state):
frappe.respond_as_web_page(_("Link Expired"),
_("Document {0} has been set to state {1} by {2}"
.format(
frappe.bold(doc.get('name')),
frappe.bold(doc_workflow_state),
frappe.bold(frappe.get_value('User', doc.get("modified_by"), 'full_name'))
)), indicator_color='blue')
def clear_old_workflow_actions(doc, user=None):
user = user if user else frappe.session.user
frappe.db.sql('''delete from `tabWorkflow Action`
where reference_doctype=%s and reference_name=%s and user!=%s and status="Open"''',
(doc.get('doctype'), doc.get('name'), user))
def update_completed_workflow_actions(doc, user=None):
user = user if user else frappe.session.user
frappe.db.sql('''update `tabWorkflow Action` set status='Completed', completed_by=%s
where reference_doctype=%s and reference_name=%s and user=%s and status="Open"''',
(user, doc.get('doctype'), doc.get('name'), user))
def get_next_possible_transitions(workflow_name, state):
return frappe.get_all('Workflow Transition',
fields=['allowed', 'action', 'state', 'allow_self_approval'],
filters=[['parent', '=', workflow_name],
['state', '=', state]])
def get_users_next_action_data(transitions, doc):
user_data_map = {}
for transition in transitions:
users = get_users_with_role(transition.allowed)
filtered_users = filter_allowed_users(users, doc, transition)
for user in filtered_users:
if not user_data_map.get(user):
user_data_map[user] = {
'possible_actions': [],
'email': frappe.db.get_value('User', user, 'email'),
}
user_data_map[user].get('possible_actions').append({
'action_name': transition.action,
'action_link': get_workflow_action_url(transition.action, doc, user)
})
return user_data_map
def create_workflow_actions_for_users(users, doc):
for user in users:
frappe.get_doc({
'doctype': 'Workflow Action',
'reference_doctype': doc.get('doctype'),
'reference_name': doc.get('name'),
'workflow_state': get_doc_workflow_state(doc),
'status': 'Open',
'user': user
}).insert(ignore_permissions=True)
frappe.db.commit()
def send_workflow_action_email(users_data, doc):
common_args = get_common_email_args(doc)
message = common_args.pop('message', None)
for d in users_data:
email_args = {
'recipients': [d.get('email')],
'args': {
'actions': d.get('possible_actions'),
'message': message
},
}
email_args.update(common_args)
enqueue(method=frappe.sendmail, queue='short', **email_args)
def get_workflow_action_url(action, doc, user):
apply_action_method = "/api/method/frappe.workflow.doctype.workflow_action.workflow_action.apply_action"
params = {
"doctype": doc.get('doctype'),
"docname": doc.get('name'),
"action": action,
"current_state": get_doc_workflow_state(doc),
"user": user,
"last_modified": doc.get('modified')
}
return get_url(apply_action_method + "?" + get_signed_params(params))
def get_confirm_workflow_action_url(doc, action, user):
confirm_action_method = "/api/method/frappe.workflow.doctype.workflow_action.workflow_action.confirm_action"
params = {
"action": action,
"doctype": doc.get('doctype'),
"docname": doc.get('name'),
"user": user
}
return get_url(confirm_action_method + "?" + get_signed_params(params))
def get_users_with_role(role):
return [p[0] for p in frappe.db.sql("""select distinct tabUser.name
from `tabHas Role`, tabUser
where `tabHas Role`.role=%s
and tabUser.name != "Administrator"
and `tabHas Role`.parent = tabUser.name
and tabUser.enabled=1""", role)]
def is_workflow_action_already_created(doc):
return frappe.db.exists({
'doctype': 'Workflow Action',
'reference_doctype': doc.get('doctype'),
'reference_name': doc.get('name'),
'workflow_state': get_doc_workflow_state(doc)
})
def get_doc_workflow_state(doc):
workflow_name = get_workflow_name(doc.get('doctype'))
workflow_state_field = get_workflow_state_field(workflow_name)
return doc.get(workflow_state_field)
def filter_allowed_users(users, doc, transition):
"""Filters list of users by checking if user has access to doc and
if the user satisfies 'workflow transision self approval' condition
"""
from frappe.permissions import has_permission
filtered_users = []
for user in users:
if (has_approval_access(user, doc, transition)
and has_permission(doctype=doc, user=user)):
filtered_users.append(user)
return filtered_users
def get_common_email_args(doc):
doctype = doc.get('doctype')
docname = doc.get('name')
email_template = get_email_template(doc)
if email_template:
subject = frappe.render_template(email_template.subject, vars(doc))
response = frappe.render_template(email_template.response, vars(doc))
else:
subject = _('Workflow Action')
response = _('{0}: {1}'.format(doctype, docname))
common_args = {
'template': 'workflow_action',
'attachments': [frappe.attach_print(doctype, docname , file_name=docname)],
'subject': subject,
'message': response
}
return common_args
def get_email_template(doc):
"""Returns next_action_email_template
for workflow state (if available) based on doc current workflow state
"""
workflow_name = get_workflow_name(doc.get('doctype'))
doc_state = get_doc_workflow_state(doc)
template_name = frappe.db.get_value('Workflow Document State', {
'parent': workflow_name,
'state': doc_state
}, 'next_action_email_template')
if not template_name: return
return frappe.get_doc('Email Template', template_name)
|
|
import logging
import urllib
import time
import os
import json
import asyncio
import itertools
from .base import ProviderBase, ProviderSearchResultBase, ProviderError
from ..toolbox import db
from ..toolbox.net import download
from ..toolbox.utils import tostr
from ..config import config
__all__ = ['Provider']
log = logging.getLogger('stagehand.providers.thetvdb')
class ProviderSearchResult(ProviderSearchResultBase):
@property
def pid(self):
return str(self._attrs.get('id'))
@property
def name(self):
return self._attrs.get('seriesName')
@property
def names(self):
yield self.name
@property
def overview(self):
return self._attrs.get('overview')
@property
def imdb(self):
# Not available in search results
return None
@property
def year(self):
started = self.started
if started and len(started.split('-')) == 3:
return started.split('-')[0]
else:
return started
@property
def started(self):
return self._attrs.get('firstAired')
@property
def banner(self):
if 'banner' in self._attrs:
return self.provider.hostname + '/banners/' + self._attrs['banner']
class Provider(ProviderBase):
NAME = 'thetvdb'
NAME_PRINTABLE = 'TheTVDB'
IDATTR = 'thetvdbid'
CACHEATTR = 'thetvdbcache'
# It's actually 24 hours but we trim it a bit just to be safe.
TOKEN_LIFETIME_SECONDS = 23 * 3600
def __init__(self, db):
super().__init__(db)
self.hostname = 'https://www.thetvdb.com'
self._apikey = '1E9534A23E6D7DC0'
self._token = None
self._token_time = 0
db.register_object_type_attrs('series',
thetvdbid = (str, db.ATTR_SEARCHABLE | db.ATTR_INDEXED),
thetvdbcache = (dict, db.ATTR_SIMPLE)
)
db.register_object_type_attrs('episode',
thetvdbid = (str, db.ATTR_SEARCHABLE),
)
@asyncio.coroutine
def _rawapi(self, path, token=None, method='GET', body=None):
url = 'https://api.thetvdb.com' + path
headers = {
'Accept': 'application/json',
'Accept-Language' : config.misc.language.lower()
}
if token:
headers['Authorization'] = 'Bearer ' + token
if body:
headers['Content-Type'] = 'application/json'
status, data = yield from download(url, retry=4, method=method, headers=headers, data=body)
return status, json.loads(data.decode('utf8')) if data else None
@asyncio.coroutine
def _login(self):
body = json.dumps({'apikey': self._apikey})
status, response = yield from self._rawapi('/login', method='POST', body=body)
if status == 200 and 'token' in response:
return response['token']
else:
log.error('thetvdb login failed: %s', response)
raise ProviderError('thetvdb API login failed')
@asyncio.coroutine
def _api(self, path, method='GET', body=None):
"""
Invokes an API method, logging in or refreshing the token if necessary.
"""
now = time.time()
if not self._token or now - self._token_time > Provider.TOKEN_LIFETIME_SECONDS:
# Acquire a new token
self._token = yield from self._login()
self._token_time = now
status, response = yield from self._rawapi(path, self._token, method, body)
log.debug('API %s returned status %d', path, status)
if status == 401:
if self._token_time == now:
raise ProviderError('thetvDB API refused token')
else:
# Token was refused before expiry. Clear token and recurse to cause relogin.
self._token = None
status, response = yield from self._api(path,method, body)
elif status != 200:
log.debug('API %s returned status %d', path, status)
return status, response
@asyncio.coroutine
def search(self, name):
results = []
quoted = urllib.parse.quote(name.replace('-', ' ').replace('_', ' '))
log.info('searching TheTVDB for %s', name)
status, response = yield from self._api('/search/series?name=' + quoted)
if status == 200:
if 'data' not in response:
log.warning('data element missing from response')
else:
for result in response['data']:
results.append(ProviderSearchResult(self, result))
return results
@asyncio.coroutine
def get_series(self, id):
log.debug('retrieving series data for %s', id)
if not self.get_last_updated():
# DB doesn't know about server time. Set to current time so that
# subsequent calls to get_changed_series_ids() have a reference
# point.
self.db.set_metadata('thetvdb::servertime', int(time.time()))
series = {'episodes': []}
log.info('fetching series %s from TheTVDB', id)
status, response = yield from self._api('/series/' + id)
if status != 200:
return series
elif 'data' not in response:
log.warning('data element missing from response')
return series
data = response['data']
try:
series['runtime'] = int(data['runtime'])
except (ValueError, KeyError):
pass
try:
# XXX: is Airs_Time guaranteed to be well formatted?
# Should we be more robust?
timetuple = time.strptime(data.get('airsTime', ''), '%I:%M %p')
series['airtime'] = tostr(time.strftime('%H:%M', timetuple))
except ValueError:
pass
# Get any existing series and see if we need to fetch banner data.
# TODO: use /series/{id}/images to pick the highest rated banner
# and fetch the poster as well.
existing = self.db.get_series_by_id('thetvdb:{}'.format(data['id']))
missing = not existing or not existing.banner_data
if missing and data.get('banner'):
# Need to fetch banner, either because it changed (different banner with
# a higher rating?) or because we never had one.
url = self.hostname + '/banners/' + data['banner']
log.debug('refresh series banner %s', url)
status, banner_data = yield from download(url, retry=3)
if status == 200:
series['banner_data'] = banner_data
else:
log.error('banner download failed for series %s', data.get('seriesName', data['id']))
from ..tvdb import Series
status_str = data.get('status', '').lower()
if status_str.startswith('cont'): # continuing
status = Series.STATUS_RUNNING
elif status_str.startswith('on'): # on hiaitus
status = Series.STATUS_SUSPENDED
elif status_str.startswith('end'): # ended
status = Series.STATUS_ENDED
else:
status = Series.STATUS_UNKNOWN
series.update({
'id': str(data['id']),
'name': data.get('seriesName'),
'poster': self.hostname + '/banners/' + data['poster'] if data.get('poster') else None,
'banner': self.hostname + '/banners/' + data['banner'] if data.get('banner') else None,
'overview': data.get('overview'),
'genres': [g.strip().lower() for g in data.get('genres', []) if g],
# TODO: do a sanity check on FirstAired format.
'started': data.get('firstAired'),
'status': status,
'imdbid': data.get('imdbId')
})
# Iterate over all pages of episodes.
for page in itertools.count(1):
status, response = yield from self._api('/series/{}/episodes?page={}'.format(id, page))
if status != 200:
break
elif 'data' not in response:
log.warning('data element missing from episodes response')
break
for episode in response['data']:
try:
series['episodes'].append({
'id': str(episode['id']),
'name': episode.get('episodeName'),
'season': int(episode['airedSeason']),
'episode': int(episode['airedEpisodeNumber']),
# TODO: do a sanity check on FirstAired format.
'airdate': episode.get('firstAired'),
'overview': episode.get('overview')
})
except Exception as e:
log.exception("failed to extract episode details: %s %s", e, episode)
if 'links' not in response or response['links'].get('last', page) == page:
break
return series
@asyncio.coroutine
def get_changed_series_ids(self):
servertime = self.get_last_updated()
if not servertime:
# No servertime stored, so there must not be any series in db.
return
now = int(time.time())
# Grab all series ids currently in the DB.
series = set([o[self.IDATTR] for o in self.db.query(type='series', attrs=[self.IDATTR])])
if now - servertime > 60*60*24*7:
log.warning("haven't updated in over a week, returning all series")
# Haven't updated in over a week (which is the upper bound for the API), so refresh all series.
self.db.set_metadata('thetvdb::servertime', now)
return list(series)
ids = []
status, response = yield from self._api('/updated/query?fromTime={}'.format(servertime))
if status == 200:
if 'data' not in response:
log.warning('data element missing from response')
elif response['data']:
for result in response['data']:
ids.append(str(result['id']))
self.db.set_metadata('thetvdb::servertime', now)
log.debug('set servertime %s', now)
return ids
def get_last_updated(self):
return int(self.db.get_metadata('thetvdb::servertime', 0))
|
|
"""Content that is specific to Annotation IODs."""
from copy import deepcopy
from typing import cast, List, Optional, Sequence, Tuple, Union
import numpy as np
from pydicom.dataset import Dataset
from pydicom.sr.coding import Code
from highdicom.ann.enum import (
AnnotationCoordinateTypeValues,
AnnotationGroupGenerationTypeValues,
GraphicTypeValues,
)
from highdicom.content import AlgorithmIdentificationSequence
from highdicom.sr.coding import CodedConcept
from highdicom.uid import UID
from highdicom._module_utils import check_required_attributes
class Measurements(Dataset):
"""Dataset describing measurements of annotations."""
def __init__(
self,
name: Union[Code, CodedConcept],
values: np.ndarray,
unit: Union[Code, CodedConcept]
) -> None:
"""
Parameters
----------
name: Union[highdicom.sr.CodedConcept, pydicom.sr.coding.Code]
Concept name
values: numpy.ndarray
One-dimensional array of floating-point values. Some values may be
NaN (``numpy.nan``) if no measurement is available for a given
annotation. Values must be sorted such that the *n*-th value
represents the measurement for the *n*-th annotation.
unit: Union[highdicom.sr.CodedConcept, pydicom.sr.coding.Code], optional
Coded units of measurement (see :dcm:`CID 7181 <part16/sect_CID_7181.html>`
"Abstract Multi-dimensional Image Model Component Units")
""" # noqa: E501
super().__init__()
if isinstance(name, Code):
name = CodedConcept(*name)
self.ConceptNameCodeSequence = [name]
if isinstance(unit, Code):
unit = CodedConcept(*unit)
self.MeasurementUnitsCodeSequence = [unit]
is_nan = np.isnan(values)
stored_values = np.array(values[~is_nan], np.float32)
item = Dataset()
item.FloatingPointValues = stored_values.tobytes()
if np.any(is_nan):
stored_indices = (np.where(~is_nan)[0] + 1).astype(np.int32)
item.AnnotationIndexList = stored_indices.tobytes()
self.MeasurementValuesSequence = [item]
@property
def name(self) -> CodedConcept:
"""highdicom.sr.CodedConcept: coded name"""
return self.ConceptNameCodeSequence[0]
@property
def unit(self) -> CodedConcept:
"""highdicom.sr.coding.CodedConcept: coded unit"""
return self.MeasurementUnitsCodeSequence[0]
def get_values(self, number_of_annotations: int) -> np.ndarray:
"""Get measured values for annotations.
Parameters
----------
number_of_annotations: int
Number of annotations in the annotation group
Returns
-------
numpy.ndarray
One-dimensional array of floating-point numbers of length
`number_of_annotations`. The array may be sparse and annotations
for which no measurements are available have value ``numpy.nan``.
Raises
------
IndexError
In case the measured values cannot be indexed given the indices
stored in the Annotation Index List.
"""
item = self.MeasurementValuesSequence[0]
values = np.zeros((number_of_annotations, ), np.float32)
values[:] = np.float32(np.nan)
stored_values = np.frombuffer(item.FloatingPointValues, np.float32)
if hasattr(item, 'AnnotationIndexList'):
stored_indices = np.frombuffer(item.AnnotationIndexList, np.int32)
# Convert from DICOM one-based to Python zero-based indexing
stored_indices = stored_indices - 1
else:
stored_indices = np.arange(number_of_annotations)
try:
values[stored_indices] = stored_values
except IndexError as error:
raise IndexError(
'Could not get values of measurements because of incorrect '
f'annotation indices: {error}. This may either be due to '
'incorrect encoding of the measurements or due to incorrectly '
'specified "number_of_annotations".'
)
return values
@classmethod
def from_dataset(cls, dataset: Dataset) -> 'Measurements':
"""Construct instance from an existing dataset.
Parameters
----------
dataset: pydicom.dataset.Dataset
Dataset representing an item of the Measurements Sequence.
Returns
-------
highdicom.ann.Measurements
Item of the Measurements Sequence
"""
if not isinstance(dataset, Dataset):
raise TypeError(
'Dataset must be of type pydicom.dataset.Dataset.'
)
check_required_attributes(
dataset,
module='microscopy-bulk-simple-annotations',
base_path=['AnnotationGroupSequence', 'MeasurementsSequence'],
)
measurements = deepcopy(dataset)
measurements.__class__ = cls
measurements.ConceptNameCodeSequence = [
CodedConcept.from_dataset(
measurements.ConceptNameCodeSequence[0]
)
]
measurements.MeasurementUnitsCodeSequence = [
CodedConcept.from_dataset(
measurements.MeasurementUnitsCodeSequence[0]
)
]
return cast(Measurements, measurements)
class AnnotationGroup(Dataset):
"""Dataset describing a group of annotations."""
def __init__(
self,
number: int,
uid: str,
label: str,
annotated_property_category: Union[Code, CodedConcept],
annotated_property_type: Union[Code, CodedConcept],
graphic_type: Union[str, GraphicTypeValues],
graphic_data: Sequence[np.ndarray],
algorithm_type: Union[str, AnnotationGroupGenerationTypeValues],
algorithm_identification: Optional[
AlgorithmIdentificationSequence
] = None,
measurements: Optional[Sequence[Measurements]] = None,
description: Optional[str] = None,
anatomic_regions: Optional[
Sequence[Union[Code, CodedConcept]]
] = None,
primary_anatomic_structures: Optional[
Sequence[Union[Code, CodedConcept]]
] = None
):
"""
Parameters
----------
number: int
One-based number for identification of the annotation group
uid: str
Unique identifier of the annotation group
label: str
User-defined label for identification of the annotation group
annotated_property_category: Union[pydicom.sr.coding.Code, highdicom.sr.CodedConcept]
Category of the property the annotated regions of interest
represents, e.g.,
``Code("49755003", "SCT", "Morphologically Abnormal Structure")``
(see :dcm:`CID 7150 <part16/sect_CID_7150.html>`
"Segmentation Property Categories")
annotated_property_type: Union[pydicom.sr.coding.Code, highdicom.sr.CodedConcept]
Property the annotated regions of interest represents, e.g.,
``Code("108369006", "SCT", "Neoplasm")``
(see :dcm:`CID 8135 <part16/sect_CID_8135.html>`
"Microscopy Annotation Property Types")
graphic_type: Union[str, highdicom.ann.GraphicTypeValues]
Graphic type of annotated regions of interest
graphic_data: Sequence[numpy.ndarray]
Array of ordered spatial coordinates, where each row of an array
represents a (Column,Row) coordinate pair or (X,Y,Z) coordinate
triplet.
algorithm_type: Union[str, highdicom.ann.AnnotationGroupGenerationTypeValues]
Type of algorithm that was used to generate the annotation
algorithm_identification: Union[highdicom.AlgorithmIdentificationSequence, None], optional
Information useful for identification of the algorithm, such
as its name or version. Required unless the `algorithm_type` is
``"MANUAL"``
measurements: Union[Sequence[highdicom.ann.Measurements], None], optional
One or more sets of measurements for annotated regions of
interest
description: Union[str, None], optional
Description of the annotation group
anatomic_regions: Union[Sequence[Union[pydicom.sr.coding.Code, highdicom.sr.CodedConcept]], None], optional
Anatomic region(s) into which annotations fall
primary_anatomic_structures: Union[Sequence[Union[highdicom.sr.Code, highdicom.sr.CodedConcept]], None], optional
Anatomic structure(s) the annotations represent
(see CIDs for domain-specific primary anatomic structures)
""" # noqa: E501
super().__init__()
if not isinstance(number, int):
raise TypeError('Argument "number" must be an integer.')
if number < 1:
raise ValueError('Argument "number" must be a positive integer.')
self.AnnotationGroupNumber = number
self.AnnotationGroupUID = str(uid)
self.AnnotationGroupLabel = str(label)
if description is not None:
self.AnnotationGroupDescription = description
algorithm_type = AnnotationGroupGenerationTypeValues(algorithm_type)
self.AnnotationGroupGenerationType = algorithm_type.value
if algorithm_type != AnnotationGroupGenerationTypeValues.MANUAL:
if algorithm_identification is None:
raise TypeError(
'Argument "algorithm_identification" must be provided if '
f'argument "algorithm_type" is "{algorithm_type.value}".'
)
if not isinstance(algorithm_identification,
AlgorithmIdentificationSequence):
raise TypeError(
'Argument "algorithm_identification" must have type '
'AlgorithmIdentificationSequence.'
)
self.AnnotationGroupAlgorithmIdentificationSequence = \
algorithm_identification
if isinstance(annotated_property_category, Code):
self.AnnotationPropertyCategoryCodeSequence = [
CodedConcept(*annotated_property_category)
]
else:
self.AnnotationPropertyCategoryCodeSequence = [
annotated_property_category,
]
if isinstance(annotated_property_type, Code):
self.AnnotationPropertyTypeCodeSequence = [
CodedConcept(*annotated_property_type),
]
else:
self.AnnotationPropertyTypeCodeSequence = [
annotated_property_type,
]
self.NumberOfAnnotations = len(graphic_data)
graphic_type = GraphicTypeValues(graphic_type)
self.GraphicType = graphic_type.value
for i in range(len(graphic_data)):
num_coords = graphic_data[i].shape[0]
if graphic_type == GraphicTypeValues.POINT:
if num_coords != 1:
raise ValueError(
f'Graphic data of annotation #{i + 1} of graphic type '
'"POINT" must be a single coordinate.'
)
elif graphic_type == GraphicTypeValues.RECTANGLE:
if num_coords != 4:
raise ValueError(
f'Graphic data of annotation #{i + 1} of graphic type '
'"RECTANGLE" must be four coordinates.'
)
elif graphic_type == GraphicTypeValues.ELLIPSE:
if num_coords != 4:
raise ValueError(
f'Graphic data of annotation #{i + 1} of graphic type '
'"ELLIPSE" must be four coordinates.'
)
elif graphic_type == GraphicTypeValues.POLYLINE:
if num_coords < 2:
raise ValueError(
f'Graphic data of annotation #{i + 1} of graphic type '
'"POLYLINE" must be at least two coordinates.'
)
elif graphic_type == GraphicTypeValues.POLYGON:
if num_coords < 3:
raise ValueError(
f'Graphic data of annotation #{i + 1} of graphic type '
'"POLYGON" must be at least three coordinates.'
)
if np.allclose(graphic_data[i][0], graphic_data[i][-1]):
raise ValueError(
'The first and last coordinate of graphic data of '
f'annotation #{i + 1} of graphic type "POLYGON" '
'must not be identical. '
'Note that the ANN Graphic Type is different in this '
'respect from the corresponding SR Graphic Type.'
)
else:
raise ValueError(
f'Graphic data of annotation #{i + 1} has an unknown '
'graphic type.'
)
try:
coordinates = np.concatenate(graphic_data, axis=0)
except ValueError:
raise ValueError(
'Items of argument "graphic_data" must be arrays with the '
'same dimensions.'
)
if coordinates.dtype.kind in ('u', 'i'):
coordinates = coordinates.astype(np.float32)
if coordinates.ndim != 2:
raise ValueError(
'Items of argument "graphic_data" must be two-dimensional '
'arrays.'
)
if coordinates.shape[1] not in (2, 3):
raise ValueError(
'Items of argument "graphic_data" must be two-dimensional '
'arrays where the second array dimension has size 2 or 3.'
)
coordinate_type = AnnotationCoordinateTypeValues.SCOORD
if coordinates.shape[1] == 3:
coordinate_type = AnnotationCoordinateTypeValues.SCOORD3D
if not np.all(np.isfinite(coordinates)):
raise ValueError(
'Items of argument "graphic_data" must be arrays of finite '
'floating-point numbers. Some values are not finite, '
'i.e., are either NaN, +inf, or -inf.'
)
if coordinates.shape[1] == 3:
unique_z_values = np.unique(coordinates[:, 2])
if len(unique_z_values) == 1:
self.CommonZCoordinateValue = unique_z_values[0]
coordinates_data = coordinates[:, 0:2].flatten()
dimensionality = 2
else:
coordinates_data = coordinates.flatten()
dimensionality = 3
else:
coordinates_data = coordinates.flatten()
dimensionality = 2
if coordinates.dtype == np.double:
self.DoublePointCoordinatesData = coordinates_data.tobytes()
else:
self.PointCoordinatesData = coordinates_data.tobytes()
self._graphic_data = {coordinate_type: graphic_data}
if graphic_type in (
GraphicTypeValues.POLYGON,
GraphicTypeValues.POLYLINE,
):
spans = [item.shape[0] * dimensionality for item in graphic_data]
point_indices = np.cumsum(spans, dtype=np.int32) + 1
point_indices = np.concatenate([
np.array([1], dtype=np.int32),
point_indices[:-1]
])
self.LongPrimitivePointIndexList = point_indices.tobytes()
self.AnnotationAppliesToAllZPlanes = 'NO'
self.AnnotationAppliesToAllOpticalPaths = 'YES'
if measurements is not None:
self.MeasurementsSequence = []
for i, item in enumerate(measurements):
if not isinstance(item, Measurements):
raise TypeError(
f'Item #{i} of argument "measurements" must have '
'type Measurements.'
)
error_message = (
f'The number of values of item #{i} of argument '
'"measurements" must match the number of annotations.'
)
try:
measured_values = item.get_values(self.NumberOfAnnotations)
except IndexError:
raise ValueError(error_message)
if len(measured_values) != self.NumberOfAnnotations:
# This should not occur, but safety first.
raise ValueError(error_message)
self.MeasurementsSequence.append(item)
if anatomic_regions is not None:
self.AnatomicRegionSequence = [
CodedConcept(
region.value,
region.scheme_designator,
region.meaning,
region.scheme_version
)
for region in anatomic_regions
]
if primary_anatomic_structures is not None:
self.PrimaryAnatomicStructureSequence = [
CodedConcept(
structure.value,
structure.scheme_designator,
structure.meaning,
structure.scheme_version
)
for structure in primary_anatomic_structures
]
@property
def label(self) -> str:
"""str: label"""
return str(self.AnnotationGroupLabel)
@property
def number(self) -> int:
"""int: one-based identification number"""
return int(self.AnnotationGroupNumber)
@property
def uid(self) -> UID:
"""highdicom.UID: unique identifier"""
return UID(self.AnnotationGroupUID)
@property
def graphic_type(self) -> GraphicTypeValues:
"""highdicom.ann.GraphicTypeValues: graphic type"""
return GraphicTypeValues(self.GraphicType)
@property
def annotated_property_category(self) -> CodedConcept:
"""highdicom.sr.CodedConcept: coded annotated property category"""
return self.AnnotationPropertyCategoryCodeSequence[0]
@property
def annotated_property_type(self) -> CodedConcept:
"""highdicom.sr.CodedConcept: coded annotated property type"""
return self.AnnotationPropertyTypeCodeSequence[0]
@property
def algorithm_type(self) -> AnnotationGroupGenerationTypeValues:
"""highdicom.ann.AnnotationGroupGenerationTypeValues: algorithm type"""
return AnnotationGroupGenerationTypeValues(
self.AnnotationGroupGenerationType
)
@property
def algorithm_identification(
self
) -> Union[AlgorithmIdentificationSequence, None]:
"""Union[highdicom.AlgorithmIdentificationSequence, None]:
Information useful for identification of the algorithm, if any.
"""
if hasattr(self, 'AnnotationGroupAlgorithmIdentificationSequence'):
return self.AnnotationGroupAlgorithmIdentificationSequence
return None
@property
def anatomic_regions(self) -> List[CodedConcept]:
"""List[highdicom.sr.CodedConcept]:
List of anatomic regions into which the annotations fall.
May be empty.
"""
if not hasattr(self, 'AnatomicRegionSequence'):
return []
return list(self.AnatomicRegionSequence)
@property
def primary_anatomic_structures(self) -> List[CodedConcept]:
"""List[highdicom.sr.CodedConcept]:
List of anatomic anatomic structures the annotations represent.
May be empty.
"""
if not hasattr(self, 'PrimaryAnatomicStructureSequence'):
return []
return list(self.PrimaryAnatomicStructureSequence)
def get_graphic_data(
self,
coordinate_type: Union[str, AnnotationCoordinateTypeValues]
) -> List[np.ndarray]:
"""Get spatial coordinates of all graphical annotations.
Parameters
----------
coordinate_type: Union[str, highdicom.ann.AnnotationCoordinateTypeValues]
Coordinate type of annotation
Returns
-------
List[numpy.ndarray]
Two-dimensional array of floating-point values representing either
2D or 3D spatial coordinates for each graphical annotation
""" # noqa: E501
coordinate_type = AnnotationCoordinateTypeValues(coordinate_type)
if self._graphic_data:
if coordinate_type not in self._graphic_data:
raise ValueError(
'Graphic data is not available for Annotation Coordinate '
f'Type "{coordinate_type.value}".'
)
else:
if coordinate_type == AnnotationCoordinateTypeValues.SCOORD:
coordinate_dimensionality = 2
else:
coordinate_dimensionality = 3
try:
coordinates_data = getattr(self, 'DoublePointCoordinatesData')
coordinates_dtype = np.float64
except AttributeError:
coordinates_data = getattr(self, 'PointCoordinatesData')
coordinates_dtype = np.float32
decoded_coordinates_data = np.frombuffer(
coordinates_data,
coordinates_dtype
)
if hasattr(self, 'CommonZCoordinateValue'):
stored_coordinate_dimensionality = 2
else:
stored_coordinate_dimensionality = coordinate_dimensionality
# Reshape array to stack of points
decoded_coordinates_data = decoded_coordinates_data.reshape(
-1,
stored_coordinate_dimensionality
)
if hasattr(self, 'CommonZCoordinateValue'):
# Add in a column for the shared z coordinate
z_values = np.full(
shape=(decoded_coordinates_data.shape[0], 1),
fill_value=self.CommonZCoordinateValue,
dtype=coordinates_dtype
)
decoded_coordinates_data = np.concatenate(
[decoded_coordinates_data, z_values],
axis=1
)
# Split into objects down the first dimension
graphic_type = self.graphic_type
if graphic_type in (
GraphicTypeValues.RECTANGLE,
GraphicTypeValues.ELLIPSE,
):
# Fixed 4 coordinates per object
split_param: Union[
int,
Sequence[int]
] = len(decoded_coordinates_data) // 4
elif graphic_type == GraphicTypeValues.POINT:
# Fixed 1 coordinate per object
split_param = len(decoded_coordinates_data)
elif graphic_type in (
GraphicTypeValues.POLYLINE,
GraphicTypeValues.POLYGON,
):
# Variable number of coordinates per point
point_indices = np.frombuffer(
self.LongPrimitivePointIndexList,
dtype=np.int32
) - 1
split_param = (
point_indices // stored_coordinate_dimensionality
)[1:]
else:
raise ValueError(
'Encountered unexpected graphic type '
f'"{graphic_type.value}".'
)
graphic_data = np.split(
decoded_coordinates_data,
indices_or_sections=split_param
)
self._graphic_data[coordinate_type] = graphic_data
return self._graphic_data[coordinate_type]
def get_coordinates(
self,
annotation_number: int,
coordinate_type: Union[str, AnnotationCoordinateTypeValues]
) -> np.ndarray:
"""Get spatial coordinates of a graphical annotation.
Parameters
----------
annotation_number: int
One-based identification number of the annotation
coordinate_type: Union[str, highdicom.ann.AnnotationCoordinateTypeValues]
Coordinate type of annotation
Returns
-------
numpy.ndarray
Two-dimensional array of floating-point values representing either
2D or 3D spatial coordinates of a graphical annotation
""" # noqa: E501
graphic_data = self.get_graphic_data(coordinate_type)
annotation_index = annotation_number - 1
return graphic_data[annotation_index]
@property
def number_of_annotations(self) -> int:
"""int: Number of annotations in group"""
return int(self.NumberOfAnnotations)
def get_measurements(
self,
name: Optional[Union[Code, CodedConcept]] = None
) -> Tuple[
List[CodedConcept], np.ndarray, List[CodedConcept]
]:
"""Get measurements.
Parameters
----------
name: Union[pydicom.sr.coding.Code, highdicom.sr.CodedConcept, None], optional
Name by which measurements should be filtered
Returns
-------
names: List[highdicom.sr.CodedConcept]
Names of measurements
values: numpy.ndarray
Two-dimensional array of measurement floating point values. The
array has shape n x m, where where *n* is the number of annotations
and *m* is the number of measurements. The array may contain
``numpy.nan`` values in case a measurement is not available for a
given annotation.
units: List[highdicom.sr.CodedConcept]
Units of measurements
""" # noqa: E501
number_of_annotations = self.number_of_annotations
if hasattr(self, 'MeasurementsSequence'):
values = [
item.get_values(number_of_annotations)
for item in self.MeasurementsSequence
if name is None or item.name == name
]
if len(values) > 0:
value_array = np.vstack(values).T
else:
value_array = np.empty((number_of_annotations, 0), np.float32)
names = [
item.name for item in self.MeasurementsSequence
if name is None or item.name == name
]
units = [
item.unit for item in self.MeasurementsSequence
if name is None or item.name == name
]
else:
value_array = np.empty((number_of_annotations, 0), np.float32)
names = []
units = []
return (names, value_array, units)
def _get_coordinate_index(
self,
annotation_number: int,
coordinate_dimensionality: int,
number_of_coordinates: int
) -> np.ndarray:
"""Get coordinate index.
Parameters
----------
annotation_number: int
One-based identification number of the annotation
coordinate_dimensionality: int
Dimensionality of coordinate points
number_of_coordinates: int
Total number of coordinate points
Returns
-------
numpy.ndarray
One-dimensional array of zero-based index values to obtain the
coordinate points for a given annotation
""" # noqa: E501
annotation_index = annotation_number - 1
graphic_type = self.graphic_type
if graphic_type in (
GraphicTypeValues.POLYGON,
GraphicTypeValues.POLYLINE,
):
point_indices = np.frombuffer(
self.LongPrimitivePointIndexList,
dtype=np.int32
)
start = point_indices[annotation_index] - 1
try:
end = point_indices[annotation_index + 1] - 1
except IndexError:
end = number_of_coordinates
else:
if hasattr(self, 'CommonZCoordinateValue'):
stored_coordinate_dimensionality = 2
else:
stored_coordinate_dimensionality = coordinate_dimensionality
if graphic_type in (
GraphicTypeValues.ELLIPSE,
GraphicTypeValues.RECTANGLE,
):
length = 4 * stored_coordinate_dimensionality
elif graphic_type == GraphicTypeValues.POINT:
length = stored_coordinate_dimensionality
else:
raise ValueError(
'Encountered unexpected graphic type '
f'"{graphic_type.value}".'
)
start = annotation_index * length
end = start + length
coordinate_index = np.arange(start, end)
return coordinate_index
@classmethod
def from_dataset(cls, dataset: Dataset) -> 'AnnotationGroup':
"""Construct instance from an existing dataset.
Parameters
----------
dataset: pydicom.dataset.Dataset
Dataset representing an item of the Annotation Group Sequence.
Returns
-------
highdicom.ann.AnnotationGroup
Item of the Annotation Group Sequence
"""
if not isinstance(dataset, Dataset):
raise TypeError(
'Dataset must be of type pydicom.dataset.Dataset.'
)
check_required_attributes(
dataset,
module='microscopy-bulk-simple-annotations',
base_path=['AnnotationGroupSequence'],
)
group = deepcopy(dataset)
group.__class__ = cls
group._graphic_data = {}
group.AnnotationPropertyCategoryCodeSequence = [
CodedConcept.from_dataset(
group.AnnotationPropertyCategoryCodeSequence[0]
)
]
group.AnnotationPropertyTypeCodeSequence = [
CodedConcept.from_dataset(
group.AnnotationPropertyTypeCodeSequence[0]
)
]
if hasattr(group, 'AnnotationGroupAlgorithmIdentificationSequence'):
group.AnnotationGroupAlgorithmIdentificationSequence = \
AlgorithmIdentificationSequence.from_sequence(
group.AnnotationGroupAlgorithmIdentificationSequence
)
if hasattr(group, 'MeasurementsSequence'):
group.MeasurementsSequence = [
Measurements.from_dataset(ds)
for ds in group.MeasurementsSequence
]
if hasattr(group, 'AnatomicRegionSequence'):
group.AnatomicRegionSequence = [
CodedConcept.from_dataset(ds)
for ds in group.AnatomicRegionSequence
]
if hasattr(group, 'PrimaryAnatomicStructureSequence'):
group.PrimaryAnatomicStructureSequence = [
CodedConcept.from_dataset(ds)
for ds in group.PrimaryAnatomicStructureSequence
]
return cast(AnnotationGroup, group)
|
|
import unittest
from kivy.vector import Vector
from operator import truediv
class VectorTestCase(unittest.TestCase):
def test_initializer_oneparameter_as_list(self):
vector = Vector([1])
self.assertEqual(vector.x, 1)
with self.assertRaises(IndexError):
vector.y
def test_initializer_oneparameter_as_int(self):
with self.assertRaises(TypeError):
Vector(1)
def test_initializer_twoparameters(self):
vector = Vector(1, 2)
self.assertEqual(vector.x, 1)
self.assertEqual(vector.y, 2)
def test_initializer_noparameter(self):
with self.assertRaises(Exception):
Vector()
def test_initializer_threeparameters(self):
with self.assertRaises(Exception):
Vector(1, 2, 3)
def test_sum_twovectors(self):
finalVector = Vector(1, 1) + Vector(1, 1)
self.assertEqual(finalVector.x, 2)
self.assertEqual(finalVector.y, 2)
def test_sum_inplace(self):
finalVector = Vector(1, 1)
finalVector += Vector(1, 1)
self.assertEqual(finalVector.x, 2)
self.assertEqual(finalVector.y, 2)
def test_sum_inplace_scalar(self):
finalVector = Vector(1, 1)
finalVector += 1
self.assertEqual(finalVector.x, 2)
self.assertEqual(finalVector.y, 2)
def test_sum_scalar(self):
with self.assertRaises(TypeError):
Vector(1, 1) + 1
def test_sub_twovectors(self):
finalVector = Vector(3, 3) - Vector(2, 2)
self.assertEqual(finalVector.x, 1)
self.assertEqual(finalVector.y, 1)
def test_sub_inplace(self):
finalVector = Vector(3, 3)
finalVector -= Vector(2, 2)
self.assertEqual(finalVector.x, 1)
self.assertEqual(finalVector.y, 1)
def test_sub_scalar(self):
with self.assertRaises(TypeError):
Vector(3, 3) - 2
def test_sub_inplace_scalar(self):
finalVector = Vector(3, 3)
finalVector -= 2
self.assertEqual(finalVector.x, 1)
self.assertEqual(finalVector.y, 1)
def test_mul_twovectors(self):
finalVector = Vector(2, 2) * Vector(3, 3)
self.assertEqual(finalVector.x, 6)
self.assertEqual(finalVector.y, 6)
def test_mul_inplace(self):
finalVector = Vector(2, 2)
finalVector *= Vector(3, 3)
self.assertEqual(finalVector.x, 6)
self.assertEqual(finalVector.y, 6)
def test_mul_inplace_scalar(self):
finalVector = Vector(2, 2)
finalVector *= 3
self.assertEqual(finalVector.x, 6)
self.assertEqual(finalVector.y, 6)
def test_mul_scalar(self):
finalVector = Vector(2, 2) * 3
self.assertEqual(finalVector.x, 6)
self.assertEqual(finalVector.y, 6)
def test_rmul_list(self):
finalVector = (3, 3) * Vector(2, 2)
self.assertEqual(finalVector.x, 6)
self.assertEqual(finalVector.y, 6)
def test_rmul_scalar(self):
finalVector = 3 * Vector(2, 2)
self.assertEqual(finalVector.x, 6)
self.assertEqual(finalVector.y, 6)
def test_div_twovectors(self):
finalVector = Vector(6, 6) / Vector(2, 2)
self.assertEqual(finalVector.x, 3)
self.assertEqual(finalVector.y, 3)
def test_truediv_twovectors(self):
finalVector = truediv(Vector(6, 6), Vector(2., 2.))
self.assertEqual(finalVector.x, 3.)
self.assertEqual(finalVector.y, 3.)
def test_truediv_scalar(self):
finalVector = truediv(Vector(6, 6), 2.)
self.assertEqual(finalVector.x, 3.)
self.assertEqual(finalVector.y, 3.)
def test_div_inplace(self):
finalVector = Vector(6, 6)
finalVector /= Vector(2, 2)
self.assertEqual(finalVector.x, 3)
self.assertEqual(finalVector.y, 3)
def test_div_inplace_scalar(self):
finalVector = Vector(6, 6)
finalVector /= 2
self.assertEqual(finalVector.x, 3)
self.assertEqual(finalVector.y, 3)
def test_div_scalar(self):
finalVector = Vector(6, 6) / 2
self.assertEqual(finalVector.x, 3)
self.assertEqual(finalVector.y, 3)
#def test_rdiv_list(self):
#finalVector = (6.0, 6.0) / Vector(3.0, 3.0)
#self.assertEqual(finalVector.x, 2)
#self.assertEqual(finalVector.y, 2)
#def test_rdiv_scalar(self):
#finalVector = 6 / Vector(3, 3)
#self.assertEqual(finalVector.x, 2)
#self.assertEqual(finalVector.y, 2)
def test_sum_oversizedlist(self):
Vector(6, 6) + (1, 2)
def test_negation(self):
vector = - Vector(1, 1)
self.assertEqual(vector.x, -1)
self.assertEqual(vector.y, -1)
def test_length(self):
length = Vector(10, 10).length()
self.assertEqual(length, 14.142135623730951)
def test_length_zerozero(self):
length = Vector(0, 0).length()
self.assertEqual(length, 0)
def test_length2(self):
length = Vector(10, 10).length2()
self.assertEqual(length, 200)
def test_distance(self):
distance = Vector(10, 10).distance((5, 10))
self.assertEqual(distance, 5)
def test_distance2(self):
distance = Vector(10, 10).distance2((5, 10))
self.assertEqual(distance, 25)
def test_normalize(self):
vector = Vector(88, 33).normalize()
self.assertEqual(vector.x, 0.93632917756904444)
self.assertEqual(vector.y, 0.3511234415883917)
self.assertAlmostEqual(vector.length(), 1.0)
def test_normalize_zerovector(self):
vector = Vector(0, 0).normalize()
self.assertEqual(vector.x, 0)
self.assertEqual(vector.y, 0)
self.assertEqual(vector.length(), 0)
def test_dot(self):
result = Vector(2, 4).dot((2, 2))
self.assertEqual(result, 12)
def test_angle(self):
result = Vector(100, 0).angle((0, 100))
self.assertEqual(result, -90.0)
def test_rotate(self):
v = Vector(100, 0)
v = v.rotate(45)
self.assertEqual(v.x, 70.710678118654755)
self.assertEqual(v.y, 70.710678118654741)
def test_(self):
a = (98, 28)
b = (72, 33)
c = (10, -5)
d = (20, 88)
result = Vector.line_intersection(a, b, c, d)
self.assertEqual(result.x, 15.25931928687196)
self.assertEqual(result.y, 43.911669367909241)
def test_inbbox(self):
bmin = (0, 0)
bmax = (100, 100)
result = Vector.in_bbox((50, 50), bmin, bmax)
self.assertTrue(result)
result = Vector.in_bbox((647, -10), bmin, bmax)
self.assertFalse(result)
|
|
# Copyright (c) 2013 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack.compute.plugins.v3 import baremetal_nodes
from nova import context
from nova import exception
from nova import test
from nova.virt.baremetal import db
class FakeRequest(object):
def __init__(self, context):
self.environ = {"nova.context": context}
class BareMetalNodesTest(test.TestCase):
def setUp(self):
super(BareMetalNodesTest, self).setUp()
self.context = context.get_admin_context()
self.controller = baremetal_nodes.BareMetalNodeController()
self.request = FakeRequest(self.context)
def test_create(self):
node = {
'service_host': "host",
'cpus': 8,
'memory_mb': 8192,
'local_gb': 128,
'pm_address': "10.1.2.3",
'pm_user': "pm_user",
'pm_password': "pm_pass",
'terminal_port': 8000,
'interfaces': [],
}
response = node.copy()
response['id'] = 100
del response['pm_password']
response['instance_uuid'] = None
self.mox.StubOutWithMock(db, 'bm_node_create')
db.bm_node_create(self.context, node).AndReturn(response)
self.mox.ReplayAll()
res_dict = self.controller.create(self.request, {'node': node})
self.assertEqual({'node': response}, res_dict)
def test_delete(self):
self.mox.StubOutWithMock(db, 'bm_node_destroy')
db.bm_node_destroy(self.context, 1)
self.mox.ReplayAll()
self.controller.delete(self.request, 1)
def test_delete_node_not_found(self):
self.mox.StubOutWithMock(db, 'bm_node_destroy')
db.bm_node_destroy(self.context, 1).\
AndRaise(exception.NodeNotFound(node_id=1))
self.mox.ReplayAll()
self.assertRaises(
exc.HTTPNotFound,
self.controller.delete,
self.request,
1)
def test_index(self):
nodes = [{'id': 1},
{'id': 2},
]
interfaces = [{'id': 1, 'address': '11:11:11:11:11:11'},
{'id': 2, 'address': '22:22:22:22:22:22'},
]
self.mox.StubOutWithMock(db, 'bm_node_get_all')
self.mox.StubOutWithMock(db, 'bm_interface_get_all_by_bm_node_id')
db.bm_node_get_all(self.context).AndReturn(nodes)
db.bm_interface_get_all_by_bm_node_id(self.context, 1).\
AndRaise(exception.NodeNotFound(node_id=1))
db.bm_interface_get_all_by_bm_node_id(self.context, 2).\
AndReturn(interfaces)
self.mox.ReplayAll()
res_dict = self.controller.index(self.request)
self.assertEqual(2, len(res_dict['nodes']))
self.assertEqual([], res_dict['nodes'][0]['interfaces'])
self.assertEqual(2, len(res_dict['nodes'][1]['interfaces']))
def test_show(self):
node_id = 1
node = {'id': node_id}
interfaces = [{'id': 1, 'address': '11:11:11:11:11:11'},
{'id': 2, 'address': '22:22:22:22:22:22'},
]
self.mox.StubOutWithMock(db, 'bm_node_get')
self.mox.StubOutWithMock(db, 'bm_interface_get_all_by_bm_node_id')
db.bm_node_get(self.context, node_id).AndReturn(node)
db.bm_interface_get_all_by_bm_node_id(self.context, node_id).\
AndReturn(interfaces)
self.mox.ReplayAll()
res_dict = self.controller.show(self.request, node_id)
self.assertEqual(node_id, res_dict['node']['id'])
self.assertEqual(2, len(res_dict['node']['interfaces']))
def test_show_no_interfaces(self):
node_id = 1
node = {'id': node_id}
self.mox.StubOutWithMock(db, 'bm_node_get')
self.mox.StubOutWithMock(db, 'bm_interface_get_all_by_bm_node_id')
db.bm_node_get(self.context, node_id).AndReturn(node)
db.bm_interface_get_all_by_bm_node_id(self.context, node_id).\
AndRaise(exception.NodeNotFound(node_id=node_id))
self.mox.ReplayAll()
res_dict = self.controller.show(self.request, node_id)
self.assertEqual(node_id, res_dict['node']['id'])
self.assertEqual(0, len(res_dict['node']['interfaces']))
def test_add_interface(self):
node_id = 1
address = '11:22:33:44:55:66'
body = {'add_interface': {'address': address}}
self.mox.StubOutWithMock(db, 'bm_node_get')
self.mox.StubOutWithMock(db, 'bm_interface_create')
self.mox.StubOutWithMock(db, 'bm_interface_get')
db.bm_node_get(self.context, node_id)
db.bm_interface_create(self.context,
bm_node_id=node_id,
address=address,
datapath_id=None,
port_no=None).\
AndReturn(12345)
db.bm_interface_get(self.context, 12345).\
AndReturn({'id': 12345, 'address': address})
self.mox.ReplayAll()
res_dict = self.controller._add_interface(self.request, node_id, body)
self.assertEqual(12345, res_dict['interface']['id'])
self.assertEqual(address, res_dict['interface']['address'])
def test_remove_interface(self):
node_id = 1
interfaces = [{'id': 1},
{'id': 2},
{'id': 3},
]
body = {'remove_interface': {'id': 2}}
self.mox.StubOutWithMock(db, 'bm_node_get')
self.mox.StubOutWithMock(db, 'bm_interface_get_all_by_bm_node_id')
self.mox.StubOutWithMock(db, 'bm_interface_destroy')
db.bm_node_get(self.context, node_id)
db.bm_interface_get_all_by_bm_node_id(self.context, node_id).\
AndReturn(interfaces)
db.bm_interface_destroy(self.context, 2)
self.mox.ReplayAll()
self.controller._remove_interface(self.request, node_id, body)
def test_remove_interface_by_address(self):
node_id = 1
interfaces = [{'id': 1, 'address': '11:11:11:11:11:11'},
{'id': 2, 'address': '22:22:22:22:22:22'},
{'id': 3, 'address': '33:33:33:33:33:33'},
]
self.mox.StubOutWithMock(db, 'bm_node_get')
self.mox.StubOutWithMock(db, 'bm_interface_get_all_by_bm_node_id')
self.mox.StubOutWithMock(db, 'bm_interface_destroy')
db.bm_node_get(self.context, node_id)
db.bm_interface_get_all_by_bm_node_id(self.context, node_id).\
AndReturn(interfaces)
db.bm_interface_destroy(self.context, 2)
self.mox.ReplayAll()
body = {'remove_interface': {'address': '22:22:22:22:22:22'}}
self.controller._remove_interface(self.request, node_id, body)
def test_remove_interface_no_id_no_address(self):
node_id = 1
self.mox.StubOutWithMock(db, 'bm_node_get')
db.bm_node_get(self.context, node_id)
self.mox.ReplayAll()
body = {'remove_interface': {}}
self.assertRaises(exc.HTTPBadRequest,
self.controller._remove_interface,
self.request,
node_id,
body)
def test_add_interface_node_not_found(self):
node_id = 1
self.mox.StubOutWithMock(db, 'bm_node_get')
db.bm_node_get(self.context, node_id).\
AndRaise(exception.NodeNotFound(node_id=node_id))
self.mox.ReplayAll()
body = {'add_interface': {'address': '11:11:11:11:11:11'}}
self.assertRaises(exc.HTTPNotFound,
self.controller._add_interface,
self.request,
node_id,
body)
def test_remove_interface_node_not_found(self):
node_id = 1
self.mox.StubOutWithMock(db, 'bm_node_get')
db.bm_node_get(self.context, node_id).\
AndRaise(exception.NodeNotFound(node_id=node_id))
self.mox.ReplayAll()
body = {'remove_interface': {'address': '11:11:11:11:11:11'}}
self.assertRaises(exc.HTTPNotFound,
self.controller._remove_interface,
self.request,
node_id,
body)
|
|
'''Vector
======
The :class:`Vector` represents a 2D vector (x, y).
Our implementation is made in top of a Python list.
Exemple for constructing a Vector::
>>> # Construct a point at 82,34
>>> v = Vector(82, 34)
>>> v[0]
82
>>> v.x
82
>>> v[1]
34
>>> v.y
34
>>> # Construct by giving a list of 2 values
>>> pos = (93, 45)
>>> v = Vector(pos)
>>> v[0]
93
>>> v.x
93
>>> v[1]
45
>>> v.y
45
Optimized usage
---------------
Most of the time, you can use a list for arguments, instead of using a
Vector. For example, if you want to have the distance between 2
points::
a = (10, 10)
b = (87, 34)
# optimized method
print('distance between a and b:', Vector(a).distance(b))
# non-optimized method
va = Vector(a)
vb = Vector(b)
print('distance between a and b:', va.distance(vb))
Vector operators
----------------
The :class:`Vector` supports some numeric operator like +, -, /::
>>> Vector(1, 1) + Vector(9, 5)
[10, 6]
>>> Vector(9, 5) - Vector(5, 5)
[4, 0]
>>> Vector(10, 10) / Vector(2., 4.)
[5.0, 2.5]
>>> Vector(10, 10) / 5.
[2.0, 2.0]
You can also do in-place operations::
>>> v = Vector(1, 1)
>>> v += 2
>>> v
[3, 3]
>>> v *= 5
[15, 15]
>>> v /= 2.
[7.5, 7.5]
'''
__all__ = ('Vector', )
import math
class Vector(list):
'''Vector class. See module documentation for more information.
'''
def __init__(self, *largs):
if len(largs) == 1:
super(Vector, self).__init__(largs[0])
elif len(largs) == 2:
super(Vector, self).__init__(largs)
else:
raise Exception('Invalid vector')
def _get_x(self):
return self[0]
def _set_x(self, x):
self[0] = x
x = property(_get_x, _set_x)
''':data:`x` represent the first element in the list.
>>> v = Vector(12, 23)
>>> v[0]
12
>>> v.x
12
'''
def _get_y(self):
return self[1]
def _set_y(self, y):
self[1] = y
y = property(_get_y, _set_y)
''':data:`y` represent the second element in the list.
>>> v = Vector(12, 23)
>>> v[1]
23
>>> v.y
23
'''
def __getslice__(self, i, j):
try:
# use the list __getslice__ method and convert
# result to vector
return Vector(super(Vector, self).__getslice__(i, j))
except Exception:
raise TypeError('vector::FAILURE in __getslice__')
def __add__(self, val):
return Vector(list(map(lambda x, y: x + y, self, val)))
def __iadd__(self, val):
if type(val) in (int, float):
self.x += val
self.y += val
else:
self.x += val.x
self.y += val.y
return self
def __neg__(self):
return Vector([-x for x in self])
def __sub__(self, val):
return Vector(list(map(lambda x, y: x - y, self, val)))
def __isub__(self, val):
if type(val) in (int, float):
self.x -= val
self.y -= val
else:
self.x -= val.x
self.y -= val.y
return self
def __mul__(self, val):
try:
return Vector(list(map(lambda x, y: x * y, self, val)))
except Exception:
return Vector([x * val for x in self])
def __imul__(self, val):
if type(val) in (int, float):
self.x *= val
self.y *= val
else:
self.x *= val.x
self.y *= val.y
return self
def __rmul__(self, val):
return (self * val)
def __truediv__(self, val):
try:
return Vector(list(map(lambda x, y: x / y, self, val)))
except Exception:
return Vector([x / val for x in self])
def __div__(self, val):
try:
return Vector(list(map(lambda x, y: x / y, self, val)))
except Exception:
return Vector([x / val for x in self])
def __rdiv__(self, val):
try:
return Vector(list(map(lambda x, y: x / y, self, val)))
except Exception:
return Vector([self / x for x in val])
def __idiv__(self, val):
if type(val) in (int, float):
self.x /= val
self.y /= val
else:
self.x /= val.x
self.y /= val.y
return self
def length(self):
'''Returns the length of a vector.
>>> Vector(10, 10).length()
14.142135623730951
>>> pos = (10, 10)
>>> Vector(pos).length()
14.142135623730951
'''
return math.sqrt(self[0] ** 2 + self[1] ** 2)
def length2(self):
'''Returns the length of a vector squared.
>>> Vector(10, 10).length2()
200
>>> pos = (10, 10)
>>> Vector(pos).length2()
200
'''
return self[0] ** 2 + self[1] ** 2
def distance(self, to):
'''Returns the distance between two points.
>>> Vector(10, 10).distance((5, 10))
5.
>>> a = (90, 33)
>>> b = (76, 34)
>>> Vector(a).distance(b)
14.035668847618199
'''
return math.sqrt((self[0] - to[0]) ** 2 + (self[1] - to[1]) ** 2)
def distance2(self, to):
'''Returns the distance between two points squared.
>>> Vector(10, 10).distance2((5, 10))
25
'''
return (self[0] - to[0]) ** 2 + (self[1] - to[1]) ** 2
def normalize(self):
'''Returns a new vector that has the same direction as vec,
but has a length of one.
>>> v = Vector(88, 33).normalize()
>>> v
[0.93632917756904444, 0.3511234415883917]
>>> v.length()
1.0
'''
if self[0] == 0. and self[1] == 0.:
return Vector(0., 0.)
return self / self.length()
def dot(self, a):
'''Computes the dot product of a and b.
>>> Vector(2, 4).dot((2, 2))
12
'''
return self[0] * a[0] + self[1] * a[1]
def angle(self, a):
'''Computes the angle between a and b, and return the angle in
degrees.
>>> Vector(100, 0).angle((0, 100))
-90.0
>>> Vector(87, 23).angle((-77, 10))
-157.7920283010705
'''
angle = -(180 / math.pi) * math.atan2(
self[0] * a[1] - self[1] * a[0],
self[0] * a[0] + self[1] * a[1])
return angle
def rotate(self, angle):
'''Rotate the vector with an angle in degrees.
>>> v = Vector(100, 0)
>>> v.rotate(45)
>>> v
[70.710678118654755, 70.710678118654741]
'''
angle = math.radians(angle)
return Vector((self[0] * math.cos(angle)) - (self[1] * math.sin(angle)),
(self[1] * math.cos(angle)) + (self[0] * math.sin(angle)))
@staticmethod
def line_intersection(v1, v2, v3, v4):
'''
Finds the intersection point between the lines (1)v1->v2 and (2)v3->v4
and returns it as a vector object.
>>> a = (98, 28)
>>> b = (72, 33)
>>> c = (10, -5)
>>> d = (20, 88)
>>> Vector.line_intersection(a, b, c, d)
[15.25931928687196, 43.911669367909241]
.. warning::
This is a line intersection method, not a segment intersection.
For math see: http://en.wikipedia.org/wiki/Line-line_intersection
'''
#linear algebar sucks...seriously!!
x1, x2, x3, x4 = float(v1[0]), float(v2[0]), float(v3[0]), float(v4[0])
y1, y2, y3, y4 = float(v1[1]), float(v2[1]), float(v3[1]), float(v4[1])
u = (x1 * y2 - y1 * x2)
v = (x3 * y4 - y3 * x4)
denom = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)
if denom == 0:
return None
px = (u * (x3 - x4) - (x1 - x2) * v) / denom
py = (u * (y3 - y4) - (y1 - y2) * v) / denom
return Vector(px, py)
@staticmethod
def segment_intersection(v1, v2, v3, v4):
'''
Finds the intersection point between segments (1)v1->v2 and (2)v3->v4
and returns it as a vector object.
>>> a = (98, 28)
>>> b = (72, 33)
>>> c = (10, -5)
>>> d = (20, 88)
>>> Vector.segment_intersection(a, b, c, d)
None
>>> a = (0, 0)
>>> b = (10, 10)
>>> c = (0, 10)
>>> d = (10, 0)
>>> Vector.segment_intersection(a, b, c, d)
[5, 5]
'''
#Yaaay! I love linear algebra applied within the realms of geometry.
x1, x2, x3, x4 = float(v1[0]), float(v2[0]), float(v3[0]), float(v4[0])
y1, y2, y3, y4 = float(v1[1]), float(v2[1]), float(v3[1]), float(v4[1])
#This is mostly the same as the line_intersection
u = (x1 * y2 - y1 * x2)
v = (x3 * y4 - y3 * x4)
denom = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)
if denom == 0:
return None
px = (u * (x3 - x4) - (x1 - x2) * v) / denom
py = (u * (y3 - y4) - (y1 - y2) * v) / denom
#Here are the new bits
c1 = (x1 <= px <= x2) or (x2 <= px <= x1)
c2 = (y1 <= py <= y2) or (y2 <= py <= y2)
c3 = (x3 <= px <= x4) or (x4 <= px <= x3)
c4 = (y3 <= py <= y4) or (y4 <= py <= y3)
if (c1 and c2) and (c3 and c4):
return Vector(px, py)
else:
return None
@staticmethod
def in_bbox(point, a, b):
'''Return a true if `point` is in bbox defined by `a` and `b`.
>>> bmin = (0, 0)
>>> bmax = (100, 100)
>>> Vector.in_bbox((50, 50), bmin, bmax)
True
>>> Vector.in_bbox((647, -10), bmin, bmax)
False
'''
return ((point[0] <= a[0] and point[0] >= b[0] or
point[0] <= b[0] and point[0] >= a[0]) and
(point[1] <= a[1] and point[1] >= b[1] or
point[1] <= b[1] and point[1] >= a[1]))
|
|
# coding=utf-8
"""
License/Disclaimer
------------------
Copyright 2016 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import sys
import traceback
import matplotlib
matplotlib.use('TKagg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import matplotlib.pyplot
from sfc_models.models import Model
if sys.version_info[0] < 3:
import Tkinter as tk
from Tkinter import *
from Tkinter import messagebox
from Tkinter import ttk
else:
import tkinter as tk
from tkinter import *
from tkinter import messagebox
from tkinter import ttk
class Parameters(object):
"""
Class to hold common data members that are shared by multiple frame objects.
Not all Frames will use all data.
"""
def __init__(self):
self.Model = Model()
self.ModelName = ''
self.TimeSeriesHolder = self.Model.EquationSolver.TimeSeries
self.TimeAxisVariable = 'k'
self.MinWidth = 800
self.MinHeight = 600
self.LogDir = ''
self.SourceOptions = ('Time Series', 'Initial Steady State', 'Convergence Trace')
self.LastSource = ''
self.TimeSeriesWidget = None
self.TimeAxisMinimum = None
self.TimeStart = None
self.TimeRange = None
def SetModel(self, model):
self.Model = model
self.LastSource = ''
self.SetTimeSeriesHolder()
def SetTimeSeriesHolder(self, source_str='Time Series'):
opt = source_str
if opt not in self.SourceOptions:
raise ValueError('Unknown time series source: ' + opt)
if opt == self.LastSource:
return
if opt == self.SourceOptions[0]:
holder = self.Model.EquationSolver.TimeSeries
if opt == self.SourceOptions[1]:
holder = self.Model.EquationSolver.TimeSeriesInitialSteadyState
if opt == self.SourceOptions[2]:
holder = self.Model.EquationSolver.TimeSeriesStepTrace
self.TimeSeriesHolder = holder
self.TimeAxisVariable = self.TimeSeriesHolder.TimeSeriesName
if self.TimeAxisVariable not in holder:
holder[self.TimeAxisVariable] = [0.0, 1.0]
self.TimeAxisMinimum = int(self.GetTimeSeries(self.TimeAxisVariable)[0])
self.TimeRange = 40 # None
self.TimeStart = self.TimeAxisMinimum
self.TimeSeriesList = holder.GetSeriesList()
if self.TimeSeriesWidget is not None:
self.TimeSeriesWidget.set(self.TimeSeriesList)
self.LastSource = opt
return holder
def GetTimeSeries(self, series_name):
ser = self.TimeSeriesHolder[series_name]
return ser
class WidgetHolder(object):
def __init__(self):
self.Widgets = {}
self.Data = {}
self.ListBoxType = {}
self.MatplotlibInfo = {}
def AddEntry(self, parent, name, readonly=False):
self.Data[name] = StringVar()
if readonly:
self.Widgets[name] = Entry(parent, state=['readonly',], textvariable=self.Data[name])
else:
self.Widgets[name] = Entry(parent, textvariable=self.Data[name])
def AddButton(self, parent, name, text, command, state='!disabled'):
self.Widgets[name] = ttk.Button(parent, text=text, command=command, state=state)
def AddTree(self, parent, name, columns):
self.Widgets[name] = ttk.Treeview(parent, columns=columns)
def AddListBox(self, parent, name, height=10, single_select=True, callback=None):
if single_select:
select_mode = 'browse'
else:
select_mode='extended'
self.ListBoxType[name] = select_mode
self.Data[name] = StringVar()
self.Widgets[name] = Listbox(parent, listvariable=self.Data[name], height=height,
selectmode=select_mode)
if callback is not None:
self.Widgets[name].bind('<<ListboxSelect>>', callback)
def GetListBox(self, name):
"""
If single_select: returns string or None (no selection).
If multi-select, always returns a list of strings (possibly empty).
:param name:
:return:
"""
indices = self.Widgets[name].curselection()
mlist = self.Data[name].get()
mlist = eval(mlist)
if self.ListBoxType[name] == 'browse':
if len(indices) == 0:
return None
else:
return mlist[indices[0]]
else:
return [mlist[x[0]] for x in indices]
def SetListBox(self, name, value):
if type(value) == str:
if value == '':
value = []
else:
value = [value,]
used = tuple(value)
# used = value
# if len(value) == 0:
# used = ''
# elif len(value) == 1:
# used = value[0]
self.Data[name].set(used)
def DeleteTreeChildren(self, name, item_code):
treewidget = self.Widgets[name]
children = treewidget.get_children(item_code)
for child in children:
treewidget.delete(child)
def AddMatplotLib(self, parent, name):
Fig = matplotlib.figure.Figure(figsize=(7.5, 5), dpi=90)
subplot = Fig.add_subplot(111)
x = []
y = []
self.MatplotlibInfo[name+"line"], = subplot.plot(x, y, 'bo-')
self.MatplotlibInfo[name+'canvas'] = FigureCanvasTkAgg(Fig, master=parent)
def AddRadioButtons(self, parent, name, options):
self.Data[name] = StringVar()
widgies = []
for opt in options:
widgies.append(ttk.Radiobutton(parent, text=opt, variable=self.Data[name], value=opt))
self.Widgets[name] = widgies
def AddVariableLabel(self, parent, name):
self.Data[name] = StringVar()
self.Widgets[name] = tk.Label(parent, textvariable=self.Data[name])
def GetMatplotlibInfo(self, name, objectname):
if not objectname in ('line', 'canvas'):
raise ValueError('Unknown type of object')
return self.MatplotlibInfo[name+objectname]
def sort_series(serlist):
"""
Sort a list of series names alphabetically, except for 'k' and 't' (at the front).
Works on a copy, and returns it. (Not an in-place sort.)
This should be moved to sfc_models, since the same code appears there.
:param serlist: list
:return:
"""
new_serlist = copy.copy(serlist)
new_serlist.sort()
if 't' in new_serlist:
new_serlist.remove('t')
new_serlist.insert(0, 't')
if 'k' in new_serlist:
new_serlist.remove('k')
new_serlist.insert(0,'k')
return new_serlist
def get_int(val, accept_None=True):
try:
val_n = int(val)
except:
if accept_None and val.lower() in ('none', 'na', ''):
val_n = None
else:
raise
return val_n
def get_series_info(series_name, mod):
desc = ''
eqn = ''
try:
eq = mod.FinalEquationBlock[series_name]
eqn = eq.GetRightHandSide()
desc = eq.Description
eqn_str = '{0} = {1}'.format(series_name, eqn)
except KeyError:
# k is one variable that will not be in the FinalEquationBlock
eqn_str = ''
if series_name == 'k':
desc = '[k] Time Axis'
eqn_str = 'k = k (!)'
if eqn_str == '' and series_name == 't':
eqn_str = 't = k'
desc = '[t] Automatically generated time axis; user may override as a global equation.'
if eqn_str == '' and series_name == 'iteration':
desc = 'The iteration step within the solver algorithm'
if eqn_str == '' and series_name == 'iteration_error':
desc = 'Fitting error for equations at each iteration of the solver.'
return eqn_str, desc
def ErrorDialog(ex):
msg = "Error: {0}\n\n{1}".format(str(ex), ''.join(traceback.format_exc(limit=4)))
messagebox.showinfo(message=msg, icon='error', title='Error')
|
|
from __future__ import annotations
from logging import addLevelName
import numpy as np
import ray
import riip
import scipy.special as ssp
from scipy.optimize import minimize, root
from pymwm.cutoff import Cutoff
from pymwm.utils import coax_utils, eig_mat_utils
from pymwm.waveguide import Sampling
class Samples(Sampling):
"""A class defining samples of phase constants of coaxial waveguide
modes.
Attributes:
fill: An instance of Material class for the core
clad: An instance of Material class for the clad
size: A float indicating the size of core [um].
size2: A float indicating the optional size of core [um].
params: A dict whose keys and values are as follows:
'wl_max': A float indicating the maximum wavelength [um]
'wl_min': A float indicating the minimum wavelength [um]
'wl_imag': A float indicating the minimum value of
abs(c / f_imag) [um] where f_imag is the imaginary part of
the frequency.
'dw': A float indicating frequency interval
[rad * c / 1um]=[2.99792458e14 rad / s].
'num_n': An integer indicating the number of orders of modes.
'num_m': An integer indicating the number of modes in each
order and polarization.
ws: A 1D array indicating the real part of the angular frequencies
to be calculated [rad (c / 1um)]=[2.99792458e14 rad / s].
wis: A 1D array indicating the imaginary part of the angular
frequencies to be calculated [rad * (c / 1um)].
ri: A float indicating inner radius [um].
r: A float indicating outer radius [um].
co_list: A list indicating cutoffs for PEC waveguide.
"""
def __init__(
self, size: float, fill: dict, clad: dict, params: dict, size2: float
) -> None:
"""Init Samples class.
Args:
size: A float indicating the outer radius [um]
fill (dict): Parameters for riip.Material class for the core
clad (dict): Parameters for riip.Material class for the clad
params (dict): Keys and values are as follows:
'wl_max' (float): The maximum wavelength [um].
Defaults to 5.0.
'wl_min' (float): The minimum wavelength [um].
Defaults to 0.4.
'wl_imag' (float): The minimum value of abs(c / f_imag) [um]
where f_imag is the imaginary part of the frequency.
Defaults to 5.0.
'dw' (float): The frequency interval [rad c / 1um]=[2.99792458e14 rad / s].
Defaults to 1 / 64.
'num_n' (int): The number of orders of modes.
'num_m' (int): The number of modes in each order and polarization.
(num_m + 1 for TM-like mode, num_m for TE-like mode)
size2: A float indicating the inner radius [um]
"""
super().__init__(size, fill, clad, params, size2)
self.r = size
self.ri = size2
self.r_ratio = self.ri / self.r
num_n = self.params["num_n"]
num_m = self.params["num_m"]
co = Cutoff(num_n, num_m)
self.co_list = []
for n in range(num_n):
co_per_n = []
for pol, m_end in [("M", num_m + 2), ("E", num_m + 1)]:
for m in range(1, m_end):
alpha = (pol, n, m)
co_per_n.append(co(alpha, self.r_ratio))
self.co_list.append(np.array(co_per_n))
@property
def shape(self):
return "coax"
@property
def num_all(self):
return self.params["num_n"] * (2 * self.params["num_m"] + 1)
def beta2_pec(self, w, n):
"""Return squares of phase constants for a PEC waveguide
Args:
w: A complex indicating the angular frequency
n: A integer indicating the order of the modes.
Returns:
h2s: A 1D array indicating squares of phase constants, whose first
num_mn + 1 elements are for TM-like modes and the rest are for
TE-like modes.
"""
w_comp = w.real + 1j * w.imag
return self.fill(w_comp) * w_comp ** 2 - self.co_list[n] ** 2 / self.r ** 2
def x(
self, h2: complex | np.ndarray, w: complex, e1: complex
) -> complex | np.ndarray:
val: complex | np.ndarray = (
(1 + 1j) * np.sqrt(-0.5j * (e1 * w ** 2 - h2)) * self.ri
)
# val: complex | np.ndarray = np.sqrt(e1 * w ** 2 - h2 + 0j) * self.ri
return val
def y(
self, h2: complex | np.ndarray, w: complex, e2: complex
) -> complex | np.ndarray:
val: complex | np.ndarray = (
(1 - 1j) * np.sqrt(0.5j * (h2 - e2 * w ** 2)) * self.ri
)
# val: complex | np.ndarray = np.sqrt(h2 - e2 * w ** 2 + 0j) * self.ri
return val
def u(
self,
h2: complex | np.ndarray,
w: complex | np.ndarray,
e1: complex | np.ndarray,
) -> complex | np.ndarray:
val: complex | np.ndarray = (
(1 + 1j) * np.sqrt(-0.5j * (e1 * w ** 2 - h2)) * self.r
)
# val: complex | np.ndarray = np.sqrt(e1 * w ** 2 - h2 + 0j) * self.r
return val
def v(
self,
h2: complex | np.ndarray,
w: complex | np.ndarray,
e2: complex | np.ndarray,
) -> complex | np.ndarray:
val: complex | np.ndarray = (
(1 - 1j) * np.sqrt(0.5j * (h2 - e2 * w ** 2)) * self.r
)
# val: complex | np.ndarray = np.sqrt(h2 - e2 * w ** 2 + 0j) * self.r
return val
def eig_mat(
self, h2: complex, w: complex, pol: str, n: int, e1: complex, e2: complex
) -> tuple[np.ndarray, np.ndarray]:
"""Return matrix of characteristic equation
Args:
h2: The square of the phase constant.
w: The angular frequency
pol: The polarization
n: The order of the modes
e1: The permittivity of the core
e2: The permittivity of the clad.
Returns:
(a, b) (tuple[np.ndarray, np.ndarray]): a: matrix of characteristic equation and b: its defivative.
"""
h2comp = h2.real + 1j * h2.imag
w2 = w ** 2
hew = h2comp / e2 / w2
ee = e1 / e2
x = self.x(h2comp, w, e1)
y = self.y(h2comp, w, e2)
u = self.u(h2comp, w, e1)
v = self.v(h2comp, w, e2)
if u.imag > 0:
sign_u = 1
elif u.imag == 0:
sign_u = 0
else:
sign_u = -1
if v.real > 0:
sign_v = 1
elif v.real == 0:
sign_v = 0
else:
sign_v = -1
ph_u = np.exp(1j * sign_u * u.real)
ju = ssp.jve(n, u) * ph_u
jpu = -ssp.jve(n + 1, u) * ph_u + n / u * ju
jppu = -jpu / u - (1 - n ** 2 / u ** 2) * ju
yu = ssp.yve(n, u) * ph_u
ypu = -ssp.yve(n + 1, u) * ph_u + n / u * yu
yppu = -ypu / u - (1 - n ** 2 / u ** 2) * yu
kv = ssp.kve(n, v)
kpv = -ssp.kve(n + 1, v) + n / v * kv
kppv = -kpv / v + (1 + n ** 2 / v ** 2) * kv
ph_x = np.exp(1j * sign_u * x.real)
jx = ssp.jve(n, x) * ph_x
jpx = -ssp.jve(n + 1, x) * ph_x + n / x * jx
jppx = -jpx / x - (1 - n ** 2 / x ** 2) * jx
yx = ssp.yve(n, x) * ph_x
ypx = -ssp.yve(n + 1, x) * ph_x + n / x * yx
yppx = -ypx / x - (1 - n ** 2 / x ** 2) * yx
ph_y = np.exp(-1j * sign_v * y.imag)
iy = ssp.ive(n, y) * ph_y
ipy = ssp.ive(n + 1, y) * ph_y + n / y * iy
ippy = -ipy / y + (1 + n ** 2 / y ** 2) * iy
du_dh2 = -self.r ** 2 / (2 * u)
dv_dh2 = self.r ** 2 / (2 * v)
dx_dh2 = -self.ri ** 2 / (2 * x)
dy_dh2 = self.ri ** 2 / (2 * y)
nuv = n * (v / u + u / v)
dnuv_du = n * (-v / u ** 2 + 1 / v)
dnuv_dv = n * (-u / v ** 2 + 1 / u)
nxy = n * (y / x + x / y)
dnxy_dx = n * (-y / x ** 2 + 1 / y)
dnxy_dy = n * (-x / y ** 2 + 1 / x)
a = np.array(
[
[
jpu * kv * v + kpv * ju * u,
ypu * kv * v + kpv * yu * u,
nuv * ju * kv,
nuv * yu * kv,
],
[
jpx / yx * y + ipy / iy * jx / yx * x,
ypx / yx * y + ipy / iy * x,
nxy * jx / yx,
nxy,
],
[
hew * nuv * ju * kv,
hew * nuv * yu * kv,
ee * jpu * kv * v + kpv * ju * u,
ee * ypu * kv * v + kpv * yu * u,
],
[
hew * nxy * jx / yx,
hew * nxy,
ee * jpx / yx * y + ipy / iy * jx / yx * x,
ee * ypx / yx * y + ipy / iy * x,
],
]
)
da_du = np.array(
[
[
jppu * kv * v + kpv * (jpu * u + ju) + 1j * sign_u * a[0, 0],
yppu * kv * v + kpv * (ypu * u + yu) + 1j * sign_u * a[0, 1],
dnuv_du * ju * kv + nuv * jpu * kv + 1j * sign_u * a[0, 2],
dnuv_du * yu * kv + nuv * ypu * kv + 1j * sign_u * a[0, 3],
],
[0, 0, 0, 0],
[
hew * (dnuv_du * ju + nuv * jpu) * kv + 1j * sign_u * a[2, 0],
hew * (dnuv_du * yu + nuv * ypu) * kv + 1j * sign_u * a[2, 1],
ee * jppu * kv * v + kpv * (jpu * u + ju) + 1j * sign_u * a[2, 2],
ee * yppu * kv * v + kpv * (ypu * u + yu) + 1j * sign_u * a[2, 3],
],
[0, 0, 0, 0],
]
)
da_dv = np.array(
[
[
jpu * (kpv * v + kv) + kppv * ju * u + a[0, 0],
ypu * (kpv * v + kv) + kppv * yu * u + a[0, 1],
(dnuv_dv * kv + nuv * kpv) * ju + a[0, 2],
(dnuv_dv * kv + nuv * kpv) * yu + a[0, 3],
],
[0, 0, 0, 0],
[
hew * (dnuv_dv * kv + nuv * kpv) * ju + a[2, 0],
hew * (dnuv_dv * kv + nuv * kpv) * yu + a[2, 1],
ee * jpu * (kpv * v + kv) + kppv * ju * u + a[2, 2],
ee * ypu * (kpv * v + kv) + kppv * yu * u + a[2, 3],
],
[0, 0, 0, 0],
]
)
da_dx = np.array(
[
[0, 0, 0, 0],
[
(
(jppx / yx - jpx * ypx / yx ** 2) * y
+ ipy / iy * ((jpx / yx - jx * ypx / yx ** 2) * x + jx / yx)
),
(yppx / yx - ypx ** 2 / yx ** 2) * y + ipy / iy,
dnxy_dx * jx / yx + nxy * jpx / yx - nxy * jx * ypx / yx ** 2,
dnxy_dx,
],
[0, 0, 0, 0],
[
hew
* (dnxy_dx * jx / yx + nxy * jpx / yx - nxy * jx * ypx / yx ** 2),
hew * dnxy_dx,
(
ee * (jppx / yx - jpx * ypx / yx ** 2) * y
+ ipy / iy * ((jpx / yx - jx * ypx / yx ** 2) * x + jx / yx)
),
ee * (yppx / yx - ypx ** 2 / yx ** 2) * y + ipy / iy,
],
]
)
da_dy = np.array(
[
[0, 0, 0, 0],
[
jpx / yx + (ippy / iy - ipy ** 2 / iy ** 2) * jx / yx * x,
ypx / yx + (ippy / iy - ipy ** 2 / iy ** 2) * x,
dnxy_dy * jx / yx,
dnxy_dy,
],
[0, 0, 0, 0],
[
hew * dnxy_dy * jx / yx,
hew * dnxy_dy,
ee * jpx / yx + (ippy / iy - ipy ** 2 / iy ** 2) * jx / yx * x,
ee * ypx / yx + (ippy / iy - ipy ** 2 / iy ** 2) * x,
],
]
)
b = (
da_du * du_dh2
+ da_dv * dv_dh2
+ da_dx * dx_dh2
+ da_dy * dy_dh2
+ np.array(
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[
1.0 / (e2 * w2) * nuv * ju * kv,
1.0 / (e2 * w2) * nuv * yu * kv,
0,
0,
],
[
1.0 / (e2 * w2) * nxy * jx / yx,
1.0 / (e2 * w2) * nxy,
0,
0,
],
]
)
)
return a, b
def eig_eq(
self,
h2vec: np.ndarray,
w: complex,
pol: str,
n: int,
e1: complex,
e2: complex,
roots: np.ndarray,
) -> tuple[np.ndarray, np.ndarray]:
"""Return the value of the characteristic equation
Args:
h2vec: The real and imaginary parts of the square of propagation constant.
w: The angular frequency
pol: The polarization
n: The order of the modes
e1: The permittivity of the core
e2: The permittivity of the clad.
Returns:
val: A complex indicating the left-hand value of the characteristic
equation.
"""
num = len(roots)
h2 = h2vec[0] + h2vec[1] * 1j
a, b = self.eig_mat(h2, w, pol, n, e1, e2)
if n == 0:
if pol == "E":
f = a[0, 0] * a[1, 1] - a[0, 1] * a[1, 0]
fp = eig_mat_utils.deriv_det2_cython(
np.ascontiguousarray(a[:2, :2]), np.ascontiguousarray(b[:2, :2])
)
else:
f = a[2, 2] * a[3, 3] - a[2, 3] * a[3, 2]
fp = eig_mat_utils.deriv_det2_cython(
np.ascontiguousarray(a[2:, 2:]), np.ascontiguousarray(b[2:, 2:])
)
else:
f = np.linalg.det(a)
fp = eig_mat_utils.deriv_det4_cython(a, b)
denom = 1.0
dd = 0.0
for i in range(num):
denom *= (h2 - roots[i]) / roots[i]
ddi = -roots[i] / (h2 - roots[i]) ** 2
for j in range(num):
if j != i:
ddi /= (h2 - roots[j]) / roots[j]
dd += ddi
fp = fp / denom + f * dd
f /= denom
return np.array([f.real, f.imag]), np.array(
[[fp.real, fp.imag], [-fp.imag, fp.real]]
)
def beta2(
self, w: complex, n: int, e1: complex, e2: complex, xis: np.ndarray
) -> tuple[np.ndarray, np.ndarray]:
"""Return roots and convergences of the characteristic equation
Args:
w (complex): Angular frequency.
n (int): Order of the mode
e1 (complex): Permittivity of tha core.
e2 (complex): Permittivity of tha clad.
xis (np.ndarray): Initial approximations for the roots
whose number of elements is 2*num_m+1.
Returns:
xs: A 1D array indicating the roots, whose length is 2*num_m+1.
success: A 1D array indicating the convergence information for xs.
"""
if self.clad.label == "PEC":
xs = self.beta2_pec(w, n)
return xs, np.ones_like(xs, dtype=bool)
num_m = self.params["num_m"]
roots: list[complex] = []
vals = []
success: list[bool] = []
for i, xi in enumerate(xis):
if i < num_m + 1:
pol = "M"
else:
pol = "E"
if n == 0 and i == num_m + 1:
roots = []
args = (w, pol, n, e1, e2, self.r, self.ri, np.array(roots, dtype=complex))
result = root(
coax_utils.eig_eq_with_jac,
np.array([xi.real, xi.imag]),
args=args,
jac=True,
method="hybr",
options={"col_deriv": True},
)
x = result.x[0] + result.x[1] * 1j
if result.success:
roots.append(x)
success.append(result.success)
vals.append(x)
return np.array(vals), np.array(success)
@staticmethod
def beta_from_beta2(x):
return (1 + 1j) * np.sqrt(-0.5j * x)
# return np.sqrt(x)
# return 1j * np.sqrt(-x)
def beta2_w_min(self, n):
"""Return roots and convergences of the characteristic equation at
the lowest angular frequency, ws[0].
Args:
n: A integer indicating the order of the mode
Returns:
xs: A 1D array indicating the roots, whose length is 2*num_m+1.
success: A 1D array indicating the convergence information for xs.
"""
if self.clad.label == "PEC":
xs = self.beta2_pec(self.ws[0], n)
success = np.ones_like(xs, dtype=bool)
return xs, success
w_0 = 2 * np.pi / 10.0
e1 = self.fill(w_0)
e2_0 = self.clad(w_0) * 1000
de2 = (self.clad(w_0) - e2_0) / 5000
xs, success = self.beta2(w_0, n, e1, e2_0, self.beta2_pec(w_0, n))
xs0 = xs1 = xs
success = np.ones_like(xs0, dtype=bool)
for i in range(5001):
e2 = e2_0 + de2 * i
xis = 2 * xs1 - xs0
xs, success = self.beta2(w_0, n, e1, e2, xis)
xs = np.where(success, xs, xs1)
xs0 = xs1
xs1 = xs
xs0 = xs1
dw = (self.ws[0] - w_0) / 500
for i in range(501):
w = w_0 + dw * i
e1 = self.fill(w)
e2 = self.clad(w)
xis = 2 * xs1 - xs0
xs, success = self.beta2(w, n, e1, e2, xis)
xs = np.where(success, xs, xs1)
xs0 = xs1
xs1 = xs
return xs, success
def betas_convs(self, xs_success_list: list) -> tuple[dict, dict]:
num_n = self.params["num_n"]
num_m = self.params["num_m"]
betas = {}
convs = {}
for n in range(num_n):
xs_array, success_array = xs_success_list[n]
for m in range(1, num_m + 2):
betas[("M", n, m)] = np.zeros(
(len(self.ws), len(self.wis)), dtype=complex
)
convs[("M", n, m)] = np.zeros((len(self.ws), len(self.wis)), dtype=bool)
for m in range(1, num_m + 1):
betas[("E", n, m)] = np.zeros(
(len(self.ws), len(self.wis)), dtype=complex
)
convs[("E", n, m)] = np.zeros((len(self.ws), len(self.wis)), dtype=bool)
for iwi in range(len(self.wis)):
for iwr in range(len(self.ws)):
for i in range(num_m + 1):
x = xs_array[iwr, iwi][i]
betas[("M", n, i + 1)][iwr, iwi] = self.beta_from_beta2(x)
convs[("M", n, i + 1)][iwr, iwi] = success_array[iwr, iwi][i]
for i in range(num_m):
x = xs_array[iwr, iwi][i + num_m + 1]
betas[("E", n, i + 1)][iwr, iwi] = self.beta_from_beta2(x)
convs[("E", n, i + 1)][iwr, iwi] = success_array[iwr, iwi][
i + num_m + 1
]
return betas, convs
def beta2_adaptive(
self,
real_or_imag: str,
w: complex,
n: int,
w0: complex,
x0s: np.ndarray,
dx0=0.01,
dw_min=2 ** (-24),
) -> tuple[np.ndarray, np.ndarray]:
"""Return roots and convergences of the characteristic equation
Args:
real_or_imag (str): "real" or "imag".
w0 (complex): Angular frequency of previous step.
w (complex): Angular frequency.
n (int): Order of the mode
x0s (np.ndarray): Initial approximations for the roots
whose number of elements is 2*num_m+1.
Returns:
xs: A 1D array indicating the roots, whose length is 2*num_m+1.
success: A 1D array indicating the convergence information for xs.
"""
if self.clad.label == "PEC":
xs = self.beta2_pec(w, n)
return xs, np.ones_like(xs, dtype=bool)
def func(_w, _x0s) -> tuple[np.ndarray, np.ndarray]:
num_m = self.params["num_m"]
roots: list[complex] = []
vals = []
successes: list[bool] = []
for i, _x0 in enumerate(_x0s):
if i < num_m + 1:
pol = "M"
else:
pol = "E"
if n == 0 and i == num_m + 1:
roots = []
args = (
_w,
pol,
n,
self.fill(_w),
self.clad(_w),
self.r,
self.ri,
np.array(roots, dtype=complex),
)
result = root(
coax_utils.eig_eq_with_jac,
np.array([_x0.real, _x0.imag]),
args=args,
jac=True,
method="hybr",
options={"col_deriv": True},
)
x = result.x[0] + 1j * result.x[1]
success = result.success
if not success and np.hypot(*result.fun) < 1e-8:
success = True
if success:
roots.append(x)
vals.append(x)
successes.append(success)
return np.array(vals), np.array(successes)
_w0, _x0s = w0, x0s
_w1 = w
_x1s, successes = func(_w1, x0s)
while True:
if not np.all(successes):
factor = 0
else:
factor = min(dx0 / np.abs(_x1s - _x0s).max(), 2)
dw = max(abs(_w1 - _w0) * factor, dw_min)
if real_or_imag == "real":
_w = _w0 + dw
else:
_w = _w0 - 1j * dw
_xis = _x0s + (_x1s - _x0s) * factor
xs, successes = func(_w, _xis)
_w0, _x0s = _w1, _x1s
_w1, _x1s = _w, xs
if real_or_imag == "real":
if _w1.real > w.real:
break
else:
if _w1.imag < w.imag:
break
return func(w, _x1s)
def __call__(self, n: int):
"""Return a dict of the roots of the characteristic equation
Args:
n: A integer indicating the order of the mode
Returns:
betas: A dict containing arrays of roots, whose key is as follows:
(pol, n, m):
pol: 'E' or 'M' indicating the polarization.
n: A integer indicating the order of the mode.
m: A integer indicating the ordinal of the mode in the same
order.
convs: A dict containing the convergence information for betas,
whose key is the same as above.
"""
num_m = self.params["num_m"]
xs_array = np.zeros((len(self.ws), len(self.wis), 2 * num_m + 1), dtype=complex)
success_array = np.zeros(
(len(self.ws), len(self.wis), 2 * num_m + 1), dtype=bool
)
iwr = iwi = 0
wi = self.wis[iwi]
xis, success = self.beta2_w_min(n)
xs_array[iwr, iwi] = xis
success_array[iwr, iwi] = success
xs0 = xs1 = xis
for iwr in range(1, len(self.ws)):
wr = self.ws[iwr]
w = wr + 1j * wi
e1 = self.fill(w)
e2 = self.clad(w)
xis = 2 * xs1 - xs0
xs, success = self.beta2(w, n, e1, e2, xis)
xs = np.where(success, xs, xis)
xs_array[iwr, iwi] = xs
success_array[iwr, iwi] = success
xs0 = xs1
xs1 = xs
for iwr in range(len(self.ws)):
wr = self.ws[iwr]
xs0 = xs1 = xis = xs_array[iwr, 0]
for iwi in range(1, len(self.wis)):
wi = self.wis[iwi]
w = wr + 1j * wi
e1 = self.fill(w)
e2 = self.clad(w)
xis = 2 * xs1 - xs0
xs, success = self.beta2(w, n, e1, e2, xis)
xs = np.where(success, xs, xis)
xs_array[iwr, iwi] = xs
success_array[iwr, iwi] = success
xs0 = xs1
xs1 = xs
return xs_array, success_array
def wr_sampling(self, n: int) -> tuple[np.ndarray, np.ndarray]:
num_m = self.params["num_m"]
xs_array = np.zeros((len(self.ws), 2 * num_m + 1), dtype=complex)
success_array = np.zeros((len(self.ws), 2 * num_m + 1), dtype=bool)
iwr = 0
x0s, success = self.beta2_w_min(n)
xs_array[iwr] = x0s
success_array[iwr] = success
for iwr in range(1, len(self.ws)):
w = self.ws[iwr]
# print(iwr, w)
w0 = self.ws[iwr - 1]
xs, success = self.beta2_adaptive("real", w, n, w0, x0s)
xs_array[iwr] = xs
success_array[iwr] = success
x0s = xs
return xs_array, success_array
def wi_sampling(
self, args: tuple[int, int, np.ndarray, np.ndarray]
) -> tuple[np.ndarray, np.ndarray]:
n, iwr, x0s, success = args
num_m = self.params["num_m"]
xs_array = np.zeros((len(self.wis), 2 * num_m + 1), dtype=complex)
success_array = np.zeros((len(self.wis), 2 * num_m + 1), dtype=bool)
wr = self.ws[iwr]
iwi = 0
xs_array[iwi] = x0s
success_array[iwi] = success
for iwi in range(1, len(self.wis)):
wi = self.wis[iwi]
# print(iwi, wi)
wi0 = self.wis[iwi - 1]
xs, success = self.beta2_adaptive(
"imag", wr + 1j * wi, n, wr + 1j * wi0, x0s
)
xs_array[iwi] = xs
success_array[iwi] = success
x0s = xs
return xs_array, success_array
class SamplesLowLoss(Samples):
"""A class defining samples of phase constants of coaxial waveguide
modes in a virtual low-loss clad waveguide by subclassing the Samples class.
Attributes:
fill: An instance of Material class for the core
clad: An instance of Material class for the clad
size: A float indicating the size of core [um].
size2: A float indicating the optional size of core [um].
params: A dict whose keys and values are as follows:
'wl_max': A float indicating the maximum wavelength [um]
'wl_min': A float indicating the minimum wavelength [um]
'wl_imag': A float indicating the minimum value of
abs(c / f_imag) [um] where f_imag is the imaginary part of
the frequency.
'dw': A float indicating frequency interval
[rad * c / 1um]=[2.99792458e14 rad / s].
'num_n': An integer indicating the number of orders of modes.
'num_m': An integer indicating the number of modes in each
order and polarization.
ws: A 1D array indicating the real part of the angular frequencies
to be calculated [rad (c / 1um)]=[2.99792458e14 rad / s].
wis: A 1D array indicating the imaginary part of the angular
frequencies to be calculated [rad * (c / 1um)].
ri: A float indicating inner radius [um].
r: A float indicating outer radius [um].
co_list: A list indicating cutoffs for PEC waveguide.
"""
def __init__(self, size: float, fill: dict, clad: dict, params: dict, size2):
"""Init Samples class.
size (float): The radius of the cross section [um]
fill (dict): Parameters for riip.Material class for the core
clad (dict): Parameters for riip.Material class for the clad
params (dict): Keys and values are as follows:
'wl_max' (float): The maximum wavelength [um].
Defaults to 5.0.
'wl_min' (float): The minimum wavelength [um].
Defaults to 0.4.
'wl_imag' (float): The minimum value of abs(c / f_imag) [um]
where f_imag is the imaginary part of the frequency.
Defaults to 5.0.
'dw' (float): The frequency interval [rad c / 1um]=[2.99792458e14 rad / s].
Defaults to 1 / 64.
'num_n' (int): The number of orders of modes.
'num_m' (int): The number of modes in each order and polarization.
"""
super().__init__(size, fill, clad, params, size2)
def betas_convs(self, xs_success_list):
num_iwr = len(self.ws)
num_iwi = len(self.wis)
num_n = self.params["num_n"]
num_m = self.params["num_m"]
betas = {}
convs = {}
for n in range(num_n):
for m in range(1, num_m + 2):
betas[("M", n, m)] = np.zeros(
(len(self.ws), len(self.wis)), dtype=complex
)
convs[("M", n, m)] = np.zeros((len(self.ws), len(self.wis)), dtype=bool)
for m in range(1, num_m + 1):
betas[("E", n, m)] = np.zeros(
(len(self.ws), len(self.wis)), dtype=complex
)
convs[("E", n, m)] = np.zeros((len(self.ws), len(self.wis)), dtype=bool)
for iwr in range(num_iwr):
for iwi in range(num_iwi):
j = iwr * num_iwi + iwi
w = self.ws[iwr] + 1j * self.wis[iwi]
e2 = self.clad(w)
for n in range(num_n):
for i in range(num_m + 1):
x = xs_success_list[j][0][n][i]
v = self.v(x, w, e2)
betas[("M", n, i + 1)][iwr, iwi] = self.beta_from_beta2(x)
convs[("M", n, i + 1)][iwr, iwi] = (
xs_success_list[j][1][n][i]
if v.real > abs(v.imag)
else False
)
for i in range(num_m):
x = xs_success_list[j][0][n][i + num_m + 1]
v = self.v(x, w, e2)
betas[("E", n, i + 1)][iwr, iwi] = self.beta_from_beta2(x)
convs[("E", n, i + 1)][iwr, iwi] = (
xs_success_list[j][1][n][i + num_m + 1]
if v.real > abs(v.imag)
else False
)
return betas, convs
@ray.remote
class SamplesForRay(Samples):
"""A derived class in order to create ray actor."""
def __init__(self, size: float, fill: dict, clad: dict, params: dict, size2: float):
super().__init__(size, fill, clad, params, size2)
@ray.remote
class SamplesLowLossForRay(SamplesLowLoss):
"""A derived class in order to create ray actor."""
def __init__(self, size: float, fill: dict, clad: dict, params: dict, size2: float):
super().__init__(size, fill, clad, params, size2)
def task(self, arg: tuple[int, int, list[np.ndarray]]):
"""Return a dict of the roots of the characteristic equation
Args:
arg: (iwr, iwi, xis_list)
iwr: The ordinal of the Re(w).
iwi: The ordinal of the Im(w).
xis_list: The initial guess of roots whose length is 2*num_m+1
Returns:
xs_list: A list of num_n 1D arrays indicating the roots, whose
length is 2*num_m+1
success_list: A list of num_n 1D arrays indicating the convergence
information for xs, whose length is 2*num_m+1
"""
iwr, iwi, xis_list = arg
im_factor = self.clad.im_factor
self.clad.im_factor = 1.0
wr = self.ws[iwr]
wi = self.wis[iwi]
w = wr + 1j * wi
e1 = self.fill(w)
xs_list = []
success_list = []
for n, x0s in enumerate(xis_list):
xis = xs = x0s
success = np.ones_like(xs, dtype=bool)
for i in range(1, 8):
self.clad.im_factor = 0.5 ** i
if i == 7 or self.clad.im_factor < im_factor:
self.clad.im_factor = im_factor
e2 = self.clad(w)
xs, success = self.beta2(w, n, e1, e2, xis)
for _, ok in enumerate(success):
if not ok:
xs[_] = xis[_]
xis = xs
xs_list.append(xs)
success_list.append(success)
return xs_list, success_list
|
|
from __future__ import absolute_import
import time
import re
import six
from datetime import datetime, timedelta
from django.conf import settings
from django.db import models
from django.db.models.loading import get_model
from .fields import JSONField
from .utils import setting
AUTH_USER_MODEL = settings.AUTH_USER_MODEL
UID_LENGTH = setting('SOCIAL_AUTH_UID_LENGTH', 255)
NONCE_SERVER_URL_LENGTH = setting('SOCIAL_AUTH_NONCE_SERVER_URL_LENGTH', 255)
ASSOCIATION_SERVER_URL_LENGTH = setting(
'SOCIAL_AUTH_ASSOCIATION_SERVER_URL_LENGTH',
255
)
ASSOCIATION_HANDLE_LENGTH = setting(
'SOCIAL_AUTH_ASSOCIATION_HANDLE_LENGTH',
255
)
CLEAN_USERNAME_REGEX = re.compile(r'[^\w.@+-_]+', re.UNICODE)
class UserSocialAuth(models.Model):
"""Social Auth association model"""
user = models.ForeignKey(AUTH_USER_MODEL, related_name='social_auth')
provider = models.CharField(max_length=32)
uid = models.CharField(max_length=UID_LENGTH)
extra_data = JSONField(default='{}')
class Meta:
"""Meta data"""
unique_together = ('provider', 'uid', 'user')
app_label = 'social_auth'
def __unicode__(self):
"""Return associated user unicode representation"""
return u'%s - %s' % (six.text_type(self.user), self.provider.title())
def get_backend(self):
# Make import here to avoid recursive imports :-/
from social_auth.backends import get_backends
return get_backends().get(self.provider)
@property
def tokens(self):
"""Return access_token stored in extra_data or None"""
backend = self.get_backend()
if backend:
return backend.AUTH_BACKEND.tokens(self)
else:
return {}
def revoke_token(self, drop_token=True):
"""Attempts to revoke permissions for provider."""
if 'access_token' in self.tokens:
success = self.get_backend().revoke_token(
self.tokens['access_token'],
self.uid
)
if success and drop_token:
self.extra_data.pop('access_token', None)
self.save()
def refresh_token(self):
refresh_token = self.extra_data.get('refresh_token')
if refresh_token:
backend = self.get_backend()
if hasattr(backend, 'refresh_token'):
response = backend.refresh_token(refresh_token)
new_access_token = response.get('access_token')
# We have not got a new access token, so don't lose the
# existing one.
if not new_access_token:
return
self.extra_data['access_token'] = new_access_token
# New refresh token might be given.
new_refresh_token = response.get('refresh_token')
if new_refresh_token:
self.extra_data['refresh_token'] = new_refresh_token
self.save()
def expiration_datetime(self):
"""Return provider session live seconds. Returns a timedelta ready to
use with session.set_expiry().
If provider returns a timestamp instead of session seconds to live, the
timedelta is inferred from current time (using UTC timezone). None is
returned if there's no value stored or it's invalid.
"""
if self.extra_data and 'expires' in self.extra_data:
try:
expires = int(self.extra_data['expires'])
except (ValueError, TypeError):
return None
now = datetime.utcnow()
# Detect if expires is a timestamp
if expires > time.mktime(now.timetuple()):
# expires is a datetime
return datetime.fromtimestamp(expires) - now
else:
# expires is a timedelta
return timedelta(seconds=expires)
@classmethod
def clean_username(cls, value):
return CLEAN_USERNAME_REGEX.sub('', value)
@classmethod
def user_username(cls, user):
if hasattr(user, 'USERNAME_FIELD'):
# Django 1.5 custom user model, 'username' is just for internal
# use, doesn't imply that the model should have an username field
field_name = user.USERNAME_FIELD
else:
field_name = 'username'
return getattr(user, field_name)
@classmethod
def username_field(cls, values):
user_model = cls.user_model()
if hasattr(user_model, 'USERNAME_FIELD'):
# Django 1.5 custom user model, 'username' is just for internal
# use, doesn't imply that the model should have an username field
values[user_model.USERNAME_FIELD] = values.pop('username')
return values
@classmethod
def simple_user_exists(cls, *args, **kwargs):
"""
Return True/False if a User instance exists with the given arguments.
Arguments are directly passed to filter() manager method.
TODO: consider how to ensure case-insensitive email matching
"""
kwargs = cls.username_field(kwargs)
return cls.user_model().objects.filter(*args, **kwargs).exists()
@classmethod
def create_user(cls, *args, **kwargs):
kwargs = cls.username_field(kwargs)
return cls.user_model().objects.create_user(*args, **kwargs)
@classmethod
def get_user(cls, pk):
try:
return cls.user_model().objects.get(pk=pk)
except cls.user_model().DoesNotExist:
return None
@classmethod
def get_user_by_email(cls, email):
"""Case insensitive search"""
# Do case-insensitive match, since real-world email address is
# case-insensitive.
return cls.user_model().objects.get(email__iexact=email)
@classmethod
def resolve_user_or_id(cls, user_or_id):
if isinstance(user_or_id, cls.user_model()):
return user_or_id
return cls.user_model().objects.get(pk=user_or_id)
@classmethod
def get_social_auth_for_user(cls, user):
return user.social_auth.all()
@classmethod
def create_social_auth(cls, user, uid, provider):
if not isinstance(uid, six.string_types):
uid = six.text_type(uid)
return cls.objects.create(user=user, uid=uid, provider=provider)
@classmethod
def get_social_auth(cls, provider, uid, user):
try:
instance = cls.objects.get(
provider=provider,
uid=uid,
user=user,
)
instance.user = user
return instance
except UserSocialAuth.DoesNotExist:
return None
@classmethod
def username_max_length(cls):
return cls._field_length('USERNAME_FIELD', 'username')
@classmethod
def email_max_length(cls):
return cls._field_length('EMAIL_FIELD', 'email')
@classmethod
def _field_length(self, setting_name, default_name):
model = UserSocialAuth.user_model()
field_name = getattr(model, setting_name, default_name)
return model._meta.get_field(field_name).max_length
@classmethod
def user_model(cls):
return get_model(*AUTH_USER_MODEL.split('.'))
|
|
import os
import requests
import smtplib
import pyexcel
import pyexcel.ext.xlsx
from email import Encoders
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.MIMEImage import MIMEImage
from django.conf import settings
from django.db import transaction
from controller.models import Cadet, Parent, Session, Funds, GeneralSettings
def mail(to, subject, text, attach=None):
global_configuration_obj = GeneralSettings.objects.get()
gmail_user = global_configuration_obj.configuration['MailBox']['Email']
gmail_pwd = global_configuration_obj.configuration['MailBox']['Password']
msg = MIMEMultipart()
msg['From'] = gmail_user
msg['To'] = to
msg['Subject'] = subject
msg.attach(MIMEText(text, 'html'))
# logo part
fp = open(settings.MEDIA_ROOT + 'files_library/logo.png', 'rb')
msgImage = MIMEImage(fp.read())
fp.close()
msgImage.add_header('Content-ID', '<logo>')
msg.attach(msgImage)
if attach:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(attach, 'rb').read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % os.path.basename(attach))
msg.attach(part)
mailServer = smtplib.SMTP("smtp.gmail.com", 587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(gmail_user, gmail_pwd)
mailServer.sendmail(gmail_user, to, msg.as_string())
# Should be mailServer.quit(), but that crashes...
mailServer.close()
@transaction.non_atomic_requests
def register_cadets(file_path, msg=None):
msg = dict(status='UNKNOWN', Error=[]) if not msg else msg
excel_field_mappings = {'Participant':
['Participant: Name', 'Participant: Age as of today', 'Participant: Gender',
'Participant: Address', 'Participant: Home phone number',
'Participant: Age as of session', 'Participant: City', 'Participant: Country',
'Participant: Date of birth', 'Participant: Email address',
'Participant: State', 'Participant: USAC Training Program', 'Participant: Zip code',
'Participant: Please explain what you would like to have your son or daugher accomplish while at camp? Explain any special situations or other information the staff should know about your child.'],
'Primary P/G':
['Primary P/G: Name', 'Primary P/G: Business phone number',
'Primary P/G: Cell phone number', 'Primary P/G: Email address',
'Primary P/G: Gender', 'Primary P/G: Home phone number', 'Initial_Funds'],
'Secondary P/G':
['Secondary P/G: Business phone number', 'Secondary P/G: Cell phone number',
'Secondary P/G: Email address', 'Secondary P/G: Gender',
'Secondary P/G: Home phone number', 'Secondary P/G: Name'],
'Session':
['Session name', 'Session end date', 'Session location',
'Session start date', 'Session type']
}
try:
sheet = pyexcel.get_sheet(file_path)
sheet_values = sheet.to_array()
if len(sheet_values) > 0:
excel_field_mappings_copy = {}
named_column = sheet_values[0]
for key, desired_col_names in dict(excel_field_mappings).items():
for idx, col_name in enumerate(named_column):
if col_name in desired_col_names:
excel_field_mappings_copy.update({idx: col_name})
row_values = sheet_values[1:]
for row in row_values:
field_dict = {}
for field_id, field_value in enumerate(row):
try:
field_dict[
excel_field_mappings_copy[field_id]] = field_value
except KeyError:
pass
if not any(field_dict.values()):
continue
print "creating parents"
try:
lookup = {'email_address': field_dict.get(
'Primary P/G: Email address', '')}
primary_parent_obj = Parent.objects.get(**lookup)
pp_status = True
except Parent.DoesNotExist:
primary_parent_obj = Parent()
pp_status = primary_parent_obj.create_parent_by_fields(
field_dict, 'P')
secondary_parent_obj = Parent()
sp_status = secondary_parent_obj.create_parent_by_fields(
field_dict, 'S')
print "creating sessions"
session_obj = Session()
session_obj.parse_fields(field_dict)
session_obj.save()
print "creating cadet profile"
try:
lookup = {
'full_name': field_dict.get('Participant: Name', '')}
cadet_obj = Cadet.objects.get(**lookup)
except Cadet.DoesNotExist:
cadet_obj = Cadet()
cd_status = cadet_obj.parse_fields(field_dict)
if cd_status:
if pp_status:
cadet_obj.primary_parent = primary_parent_obj
if sp_status:
cadet_obj.secondary_parent = secondary_parent_obj
else:
cadet_obj.secondary_parent = primary_parent_obj
cadet_obj.sessions = session_obj
cadet_obj.save()
msg['count'] += 1
except Exception as ex:
msg['status'] = 'FAILED'
msg['Error'].append(repr(ex))
def handle_uploaded_file(f, msg):
msg = dict(status='UNKNOWN', Error=[]) if not msg else msg
try:
with open(settings.MEDIA_ROOT + 'files_library/xmcamp.xlsx', 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
except Exception as ex:
msg['status'] = 'FAILED'
msg['Error'].append(repr(ex))
@transaction.non_atomic_requests
def get_latest_payments(msg=None):
msg = dict(status='UNKNOWN', Error=[]) if not msg else msg
try:
payment_fields = ['currency', 'email', 'financial_status', 'name',
'processed_at', 'total_price']
api_url = "https://c1e818528004ca3447c62364cd6e349f:ed1299635a02858e391fc2b0d194ff43@xmccamppx.myshopify.com/admin/orders.json"
response = requests.get(api_url)
response_json = response.json()
for order in response_json.get('orders', []):
parent_qs = Parent.objects.filter(email_address=order['email'])
if parent_qs.count() > 0:
if not order['financial_status'].lower() == 'paid':
continue
if not order['product_id'] == '692530369' or not order['product_id'] == 692530369:
continue
order_name = None
funds_obj = None
remaining_amount = 0.0
parent_obj = parent_qs[0]
try:
lookup = {'parent': parent_obj, 'is_active': True}
funds_obj = Funds.objects.get(**lookup)
funds_obj.is_active = False
remaining_amount = funds_obj.remaining_amount
order_name = funds_obj.name
except Funds.DoesNotExist:
pass
if order_name == order['name']:
continue
if funds_obj:
funds_obj.save()
funds_obj = Funds()
funds_obj.parent = parent_obj
funds_obj.amount = float(
order['total_price'] - 1.25) + remaining_amount
funds_obj.remaining_amount = float(
order['total_price'] - 1.25) + remaining_amount
funds_obj.currency = order['currency']
funds_obj.name = order['name']
funds_obj.recieved_time = order['processed_at']
funds_obj.save()
except Parent.DoesNotExist as ex:
msg['status'] = 'FAILED'
msg['Error'].append(repr(ex))
except Exception as ex:
msg['status'] = 'FAILED'
msg['Error'].append(repr(ex))
|
|
#!/usr/bin/env python
# Copyright 2015 Netherlands eScience Center <info@esciencecenter.nl>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from corpora.scikit import ScikitLda
import os
import numpy
from sklearn.cluster import KMeans, DBSCAN
from sklearn.metrics import pairwise_distances
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.manifold import MDS, TSNE
import gensim
from numpy import argsort
from wordcloud import WordCloud
import math
import argparse
class clustering:
def __init__(self, dirname, dictionary):
# initialize object
self.dirname = dirname
self.dictionary = dictionary
self.load_topics(self.dirname)
self.dic = gensim.corpora.Dictionary.load(self.dictionary)
self.find_distance_matrix(metric='cosine')
self.angularize() # angularize distance matrix
self.pos = self.data_embedding(type='TSNE')
# self.main()
def return_n_words(self, dic, topic, n_words):
'''
return the top n words in the topic
'''
aa = [(dic[idx], topic[idx]) for idx in argsort(topic)[-n_words:]]
return dict(aa)
def create_scatter(self, size=100, filename=None):
'''
create scatter plot of the clusters found
'''
num_k = len(set(self.k_fit)) # number of kernels
plt.figure(figsize=(15, 15))
x = numpy.arange(num_k)
# TODO: yys is unused!!
yys = [i + x + (i * x)**2 for i in range(num_k)]
colors = cm.nipy_spectral(numpy.linspace(0, 1, num_k))
for idx in range(0, num_k):
plt.scatter(self.pos[numpy.where(self.k_fit == idx), 0], self.pos[numpy.where(self.k_fit == idx), 1],
s=100, label=str(idx), c=colors[idx])
plt.legend()
if filename == None:
plt.show()
else:
plt.savefig(filename, dpi=300)
plt.close()
def create_wordcloud(self, filename=None):
'''
create a wordcloud of the top words in a cluster
'''
plt.figure()
for idx, topic in enumerate(self.topic_weights):
wc = WordCloud(background_color="white")
ww = [(word, weight) for word, weight in topic.iteritems()]
img = wc.generate_from_frequencies(ww)
plt.subplot(len(self.topic_weights), 2, 2 * idx + 1)
plt.axis('off')
plt.imshow(img)
if filename == None:
plt.show()
else:
plt.savefig(filename, dpi=300)
plt.close()
def find_topics_per_cluster(self, topics, k_fit, cluster):
'''
return the topics in a given cluster
'''
num_k = len(set(k_fit)) # number of kernels
cluster_indices = [numpy.where(k_fit == n) for n in range(0, num_k)]
topic_out = [topics[n] for n in cluster_indices[cluster][0]]
return topic_out
def load_topics(self, dirname):
self.topics = []
for subdir in [x[0] for x in os.walk(dirname)][1:]:
for file in os.listdir(subdir):
if file.endswith('pkl'):
print("attempting... ", file)
lda = ScikitLda.load(subdir + "/" + file)
for topic in lda.topics:
self.topics.append(topic / topic.sum())
def find_distance_matrix(self, metric='cosine'):
'''
compute distance matrix between topis using cosine or euclidean
distance (default=cosine distance)
'''
if metric == 'cosine':
self.distance_matrix = pairwise_distances(self.topics,
metric='cosine')
# diagonals should be exactly zero, so remove rounding errors
numpy.fill_diagonal(self.distance_matrix, 0)
if metric == 'euclidean':
self.distance_matrix = pairwise_distances(self.topics,
metric='euclidean')
def data_embedding(self, type='TSNE'):
'''
Fit distance matrix into two-dimensions embedded space using
the TSNE or MDS model
'''
if type == 'TSNE':
model = TSNE(n_components=2, metric='precomputed')
if type == 'MDS':
model = MDS(n_components=2, max_iter=3000, eps=1e-9,
dissimilarity="precomputed", n_jobs=1)
# position of points in embedding space
pos = model.fit(self.distance_matrix).embedding_
return pos
def explained_variance(self, nclusters, filename='elbow.pdf'):
'''
calculate explained variance for a range of number of clusers
defined by 1:nclusters
plot elbow curve of explained variance against number of clusters
'''
from scipy.spatial.distance import cdist, pdist
KK = range(1, nclusters + 1)
centers = [KMeans(n_clusters=k).fit(
self.distance_matrix).cluster_centers_ for k in KK]
D_k = [cdist(self.distance_matrix, cent, 'euclidean')
for cent in centers]
dist = [numpy.min(D, axis=1) for D in D_k]
# Total within-cluster sum of squares
tot_withinss = [sum(d**2) for d in dist]
# The total sum of squares
totss = sum(pdist(self.distance_matrix)**2) / \
self.distance_matrix.shape[0]
# The between-cluster sum of squares
betweenss = totss - tot_withinss
# elbow curve
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(KK, betweenss / totss * 100, 'b*-')
ax.set_ylim((0, 100))
plt.grid(True)
plt.xlabel('Number of clusters')
plt.ylabel('Percentage of variance explained (%)')
plt.title('Elbow for KMeans clustering')
if filename == None:
plt.show()
else:
plt.savefig(filename, dpi=300)
plt.close()
def computeAngle(self, x):
'''
description
'''
result = math.fabs(2 * math.acos(1 - math.fabs(x)) / math.pi)
return result
def angularize(self):
'''
angularize distance matrix
'''
for i in range(len(self.distance_matrix)):
for j in range(len(self.distance_matrix[0])):
v = self.computeAngle(self.distance_matrix[i][j])
self.distance_matrix[i][j] = v
def kmeans(self, n_clusters):
'''
run KMeans clustering algorithm with n_clusters number of clusters
'''
self.k_fit = KMeans(n_clusters=20,
precompute_distances=True).fit_predict(
self.distance_matrix)
self.remap()
def dbscan(self, min_samples=3, eps=0.56):
'''
run DBSCAN clustering algorithm
'''
self.k_fit = DBSCAN(min_samples=min_samples, eps=eps,
metric='precomputed').fit_predict(
self.distance_matrix)
self.remap()
def remap(self):
'''
remap cluster -1 to max cluster + 1
'''
self.k_fit[self.k_fit == -1] = max(self.k_fit) + 1
def find_largest_cluster(self):
'''
return the largest cluster
'''
return max(set(self.k_fit), key=list(self.k_fit).count)
def topic_weights(self, cluster, nwords=25):
'''
find topic_weights for a given cluster
'''
# find largest cluster
topic_out = self.find_topics_per_cluster(
self.topics, self.k_fit, cluster)
self.topic_weights = [self.return_n_words(self.dic, topic_out[idx], 25)
for idx in range(0, len(topic_out[:]))]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Topic model clustering')
parser.add_argument('--dirname', help='Directory with...')
parser.add_argument('--dictionary', help='gensim dictionary')
parser.add_argument('--cluster', type=str, help='clustering type')
parser.add_argument('--matrix', type=str, default='cosine',
help='distance matrix type [default=cosine]')
args = parser.parse_args()
# initialize object
cluster = clustering(args.dirname, args.dictionary)
# calculated explained variance for a range of clusters 1:nclusters
# (plots elbow curve)
cluster.explained_variance(nclusters=10, filename='elbow.pdf')
# cluster.kmeans(10)
cluster.dbscan(min_samples=3, eps=0.56)
import pdb
pdb.set_trace()
# find largest cluster
largest_cluster = cluster.find_largest_cluster()
# create output plots
cluster.topic_weights(largest_cluster)
cluster.create_wordcloud()
cluster.create_scatter()
|
|
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Backend to retrieve the video streams from Shoutcast TV
# Copyright 2007, Frank Scholz <coherence@beebits.net>
# Copyright 2008,2009 Jean-Michel Sizun <jmDOTsizunATfreeDOTfr>
from twisted.internet import defer, reactor
from twisted.web import server
from coherence.upnp.core import utils
from coherence.upnp.core import DIDLLite
from coherence.extern.simple_plugin import Plugin
from coherence import log
from coherence.backend import BackendItem, BackendStore
import zlib
from coherence.backend import BackendStore, BackendItem
DEFAULT_NAME = 'iTV'
ROOT_CONTAINER_ID = 0
SHOUTCAST_WS_URL = 'http://www.shoutcast.com/sbin/newtvlister.phtml?service=winamp2&no_compress=1'
SHOUTCAST_TUNEIN_URL = 'http://www.shoutcast.com/sbin/tunein-tvstation.pls?id=%s'
VIDEO_MIMETYPE = 'video/x-nsv'
class ProxyStream(utils.ReverseProxyUriResource, log.Loggable):
logCategory = 'itv'
stream_url = None
def __init__(self, uri):
log.Loggable.__init__(self)
self.stream_url = None
utils.ReverseProxyUriResource.__init__(self, uri)
def requestFinished(self, result):
""" self.connection is set in utils.ReverseProxyResource.render """
self.info("ProxyStream requestFinished")
if self.connection is not None:
self.connection.transport.loseConnection()
def render(self, request):
if self.stream_url is None:
def got_playlist(result):
if result is None:
self.warning('Error to retrieve playlist - nothing retrieved')
return requestFinished(result)
result = result[0].split('\n')
for line in result:
if line.startswith('File1='):
self.stream_url = line[6:].split(";")[0]
break
#print "stream URL:", self.stream_url
if self.stream_url is None:
self.warning('Error to retrieve playlist - inconsistent playlist file')
return requestFinished(result)
#self.resetUri(self.stream_url)
request.uri = self.stream_url
return self.render(request)
def got_error(error):
self.warning(error)
return None
playlist_url = self.uri
#print "playlist URL:", playlist_url
d = utils.getPage(playlist_url, timeout=20)
d.addCallbacks(got_playlist, got_error)
return server.NOT_DONE_YET
if request.clientproto == 'HTTP/1.1':
self.connection = request.getHeader('connection')
if self.connection:
tokens = map(str.lower, self.connection.split(' '))
if 'close' in tokens:
d = request.notifyFinish()
d.addBoth(self.requestFinished)
else:
d = request.notifyFinish()
d.addBoth(self.requestFinished)
return utils.ReverseProxyUriResource.render(self, request)
class Container(BackendItem):
def __init__(self, id, store, parent_id, title):
BackendItem.__init__(self)
self.url = store.urlbase + str(id)
self.parent_id = parent_id
self.id = id
self.name = title
self.mimetype = 'directory'
self.update_id = 0
self.children = []
self.store = store
self.item = DIDLLite.Container(self.id, self.parent_id, self.name)
self.item.childCount = 0
self.sorted = False
def add_child(self, child):
id = child.id
if isinstance(child.id, basestring):
_, id = child.id.split('.')
if self.children is None:
self.children = []
self.children.append(child)
self.item.childCount += 1
self.sorted = False
def get_children(self, start=0, end=0):
if self.sorted == False:
def childs_sort(x, y):
r = cmp(x.name, y.name)
return r
self.children.sort(cmp=childs_sort)
self.sorted = True
if end != 0:
return self.children[start:end]
return self.children[start:]
def get_child_count(self):
if self.children is None:
return 0
return len(self.children)
def get_path(self):
return self.url
def get_item(self):
return self.item
def get_name(self):
return self.name
def get_id(self):
return self.id
class ITVItem(BackendItem):
logCategory = 'itv'
def __init__(self, store, id, obj, parent):
BackendItem.__init__(self)
self.parent = parent
self.id = id
self.name = obj.get('name')
self.mimetype = obj.get('mimetype')
self.description = None
self.date = None
self.item = None
self.duration = None
self.store = store
self.url = self.store.urlbase + str(self.id)
self.stream_url = obj.get('url')
self.location = ProxyStream(self.stream_url)
def get_item(self):
if self.item == None:
self.item = DIDLLite.VideoItem(self.id, self.parent.id, self.name)
self.item.description = self.description
self.item.date = self.date
res = DIDLLite.Resource(self.url, 'http-get:*:%s:*' % self.mimetype)
res.duration = self.duration
#res.size = 0 #None
self.item.res.append(res)
return self.item
def get_path(self):
return self.url
class ITVStore(BackendStore):
logCategory = 'itv'
implements = ['MediaServer']
description = ('Shoutcast TV', 'cexposes the list of video streams from Shoutcast TV.', None)
options = [{'option': 'name', 'text': 'Server Name:', 'type': 'string', 'default': 'my media', 'help': 'the name under this MediaServer shall show up with on other UPnP clients'},
{'option': 'version', 'text': 'UPnP Version:', 'type': 'int', 'default': 2, 'enum': (2, 1), 'help': 'the highest UPnP version this MediaServer shall support', 'level': 'advance'},
{'option': 'uuid', 'text': 'UUID Identifier:', 'type': 'string', 'help': 'the unique (UPnP) identifier for this MediaServer, usually automatically set', 'level': 'advance'},
{'option': 'genrelist', 'text': 'Server URL', 'type': 'string', 'default': SHOUTCAST_WS_URL}
]
def __init__(self, server, **kwargs):
BackendStore.__init__(self, server, **kwargs)
self.next_id = 1000
self.config = kwargs
self.name = kwargs.get('name', DEFAULT_NAME)
self.update_id = 0
self.store = {}
self.wmc_mapping = {'4': 1000}
self.shoutcast_ws_url = self.config.get('genrelist', SHOUTCAST_WS_URL)
self.init_completed()
def __repr__(self):
return self.__class__.__name__
def storeItem(self, parent, item, id):
self.store[id] = item
parent.add_child(item)
def appendGenre(self, genre, parent):
id = self.getnextID()
item = Container(id, self, -1, genre)
self.storeItem(parent, item, id)
return item
def appendFeed(self, obj, parent):
id = self.getnextID()
item = ITVItem(self, id, obj, parent)
self.storeItem(parent, item, id)
return item
def len(self):
return len(self.store)
def get_by_id(self, id):
if isinstance(id, basestring):
id = id.split('@', 1)
id = id[0]
try:
return self.store[int(id)]
except (ValueError, KeyError):
pass
return None
def getnextID(self):
ret = self.next_id
self.next_id += 1
return ret
def upnp_init(self):
self.current_connection_id = None
if self.server:
self.server.connection_manager_server.set_variable(0, 'SourceProtocolInfo',
['http-get:*:%s:*' % VIDEO_MIMETYPE,
],
default=True)
rootItem = Container(ROOT_CONTAINER_ID, self, -1, self.name)
self.store[ROOT_CONTAINER_ID] = rootItem
self.retrieveList_attemptCount = 0
self.retrieveList(rootItem)
def retrieveList(self, parent):
self.info("Retrieving Shoutcast TV listing...")
def got_page(result):
if self.retrieveList_attemptCount == 0:
self.info("Connection to ShoutCast service successful for TV listing")
else:
self.warning("Connection to ShoutCast service successful for TV listing after %d attempts.", self.retrieveList_attemptCount)
result = result[0]
result = utils.parse_xml(result, encoding='utf-8')
genres = []
stations = {}
for stationResult in result.findall('station'):
mimetype = VIDEO_MIMETYPE
station_id = stationResult.get('id')
bitrate = stationResult.get('br')
rating = stationResult.get('rt')
name = stationResult.get('name').encode('utf-8')
genre = stationResult.get('genre')
url = SHOUTCAST_TUNEIN_URL % (station_id)
if genres.count(genre) == 0:
genres.append(genre)
sameStation = stations.get(name)
if sameStation == None or bitrate > sameStation['bitrate']:
station = {'name': name,
'station_id': station_id,
'mimetype': mimetype,
'id': station_id,
'url': url,
'bitrate': bitrate,
'rating': rating,
'genre': genre}
stations[name] = station
genreItems = {}
for genre in genres:
genreItem = self.appendGenre(genre, parent)
genreItems[genre] = genreItem
for station in stations.values():
genre = station.get('genre')
parentItem = genreItems[genre]
self.appendFeed({'name': station.get('name'),
'mimetype': station['mimetype'],
'id': station.get('station_id'),
'url': station.get('url')},
parentItem)
def got_error(error):
self.warning("Connection to ShoutCast service failed. Will retry in 5s!")
self.debug("%r", error.getTraceback())
# will retry later
self.retrieveList_attemptCount += 1
reactor.callLater(5, self.retrieveList, parent=parent)
d = utils.getPage(self.shoutcast_ws_url)
d.addCallbacks(got_page, got_error)
|
|
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/edit -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_edit
short_description: Modify, and idempotently manage openshift objects.
description:
- Modify openshift objects programmatically.
options:
state:
description:
- Currently present is only supported state.
required: true
default: present
choices: ["present"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- Name of the object that is being queried.
required: false
default: None
aliases: []
namespace:
description:
- The namespace where the object lives.
required: false
default: str
aliases: []
kind:
description:
- The kind attribute of the object.
required: True
default: None
choices:
- bc
- buildconfig
- configmaps
- dc
- deploymentconfig
- imagestream
- imagestreamtag
- is
- istag
- namespace
- project
- projects
- node
- ns
- persistentvolume
- pv
- rc
- replicationcontroller
- routes
- scc
- secret
- securitycontextconstraints
- service
- svc
aliases: []
file_name:
description:
- The file name in which to edit
required: false
default: None
aliases: []
file_format:
description:
- The format of the file being edited.
required: false
default: yaml
aliases: []
content:
description:
- Content of the file
required: false
default: None
aliases: []
force:
description:
- Whether or not to force the operation
required: false
default: None
aliases: []
separator:
description:
- The separator format for the edit.
required: false
default: '.'
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
oc_edit:
kind: rc
name: hawkular-cassandra-rc
namespace: openshift-infra
content:
spec.template.spec.containers[0].resources.limits.memory: 512
spec.template.spec.containers[0].resources.requests.memory: 256
'''
# -*- -*- -*- End included fragment: doc/edit -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_edit.py -*- -*- -*-
class Edit(OpenShiftCLI):
''' Class to wrap the oc command line tools
'''
# pylint: disable=too-many-arguments
def __init__(self,
kind,
namespace,
resource_name=None,
kubeconfig='/etc/origin/master/admin.kubeconfig',
separator='.',
verbose=False):
''' Constructor for OpenshiftOC '''
super(Edit, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.kind = kind
self.name = resource_name
self.separator = separator
def get(self):
'''return a secret by name '''
return self._get(self.kind, self.name)
def update(self, file_name, content, force=False, content_type='yaml'):
'''run update '''
if file_name:
if content_type == 'yaml':
data = yaml.load(open(file_name))
elif content_type == 'json':
data = json.loads(open(file_name).read())
changes = []
yed = Yedit(filename=file_name, content=data, separator=self.separator)
for key, value in content.items():
changes.append(yed.put(key, value))
if any([not change[0] for change in changes]):
return {'returncode': 0, 'updated': False}
yed.write()
atexit.register(Utils.cleanup, [file_name])
return self._replace(file_name, force=force)
return self._replace_content(self.kind, self.name, content, force=force, sep=self.separator)
@staticmethod
def run_ansible(params, check_mode):
'''run the ansible idempotent code'''
ocedit = Edit(params['kind'],
params['namespace'],
params['name'],
kubeconfig=params['kubeconfig'],
separator=params['separator'],
verbose=params['debug'])
api_rval = ocedit.get()
########
# Create
########
if not Utils.exists(api_rval['results'], params['name']):
return {"failed": True, 'msg': api_rval}
########
# Update
########
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed edit'}
api_rval = ocedit.update(params['file_name'],
params['content'],
params['force'],
params['file_format'])
if api_rval['returncode'] != 0:
return {"failed": True, 'msg': api_rval}
if 'updated' in api_rval and not api_rval['updated']:
return {"changed": False, 'results': api_rval, 'state': 'present'}
# return the created object
api_rval = ocedit.get()
if api_rval['returncode'] != 0:
return {"failed": True, 'msg': api_rval}
return {"changed": True, 'results': api_rval, 'state': 'present'}
# -*- -*- -*- End included fragment: class/oc_edit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_edit.py -*- -*- -*-
def main():
'''
ansible oc module for editing objects
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, required=True, type='str'),
kind=dict(required=True,
type='str',
choices=['dc', 'deploymentconfig',
'rc', 'replicationcontroller',
'svc', 'service',
'scc', 'securitycontextconstraints',
'ns', 'namespace', 'project', 'projects',
'is', 'imagestream',
'istag', 'imagestreamtag',
'bc', 'buildconfig',
'routes',
'node',
'secret',
'pv', 'persistentvolume']),
file_name=dict(default=None, type='str'),
file_format=dict(default='yaml', type='str'),
content=dict(default=None, required=True, type='dict'),
force=dict(default=False, type='bool'),
separator=dict(default='.', type='str'),
),
supports_check_mode=True,
)
rval = Edit.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_edit.py -*- -*- -*-
|
|
import base64
import calendar
import datetime
import re
import unicodedata
import warnings
from binascii import Error as BinasciiError
from email.utils import formatdate
from urllib.parse import (
ParseResult, SplitResult, _coerce_args, _splitnetloc, _splitparams, quote,
quote_plus, scheme_chars, unquote, unquote_plus,
urlencode as original_urlencode, uses_params,
)
from django.core.exceptions import TooManyFieldsSent
from django.utils.datastructures import MultiValueDict
from django.utils.deprecation import RemovedInDjango40Warning
from django.utils.functional import keep_lazy_text
from django.utils.regex_helper import _lazy_re_compile
# based on RFC 7232, Appendix C
ETAG_MATCH = _lazy_re_compile(r'''
\A( # start of string and capture group
(?:W/)? # optional weak indicator
" # opening quote
[^"]* # any sequence of non-quote characters
" # end quote
)\Z # end of string and capture group
''', re.X)
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = _lazy_re_compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = _lazy_re_compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = _lazy_re_compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = ":/?#[]@"
RFC3986_SUBDELIMS = "!$&'()*+,;="
FIELDS_MATCH = _lazy_re_compile('[&;]')
@keep_lazy_text
def urlquote(url, safe='/'):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote() function.
(was used for unicode handling on Python 2)
"""
warnings.warn(
'django.utils.http.urlquote() is deprecated in favor of '
'urllib.parse.quote().',
RemovedInDjango40Warning, stacklevel=2,
)
return quote(url, safe)
@keep_lazy_text
def urlquote_plus(url, safe=''):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote_plus()
function. (was used for unicode handling on Python 2)
"""
warnings.warn(
'django.utils.http.urlquote_plus() is deprecated in favor of '
'urllib.parse.quote_plus(),',
RemovedInDjango40Warning, stacklevel=2,
)
return quote_plus(url, safe)
@keep_lazy_text
def urlunquote(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote() function.
(was used for unicode handling on Python 2)
"""
warnings.warn(
'django.utils.http.urlunquote() is deprecated in favor of '
'urllib.parse.unquote().',
RemovedInDjango40Warning, stacklevel=2,
)
return unquote(quoted_url)
@keep_lazy_text
def urlunquote_plus(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote_plus()
function. (was used for unicode handling on Python 2)
"""
warnings.warn(
'django.utils.http.urlunquote_plus() is deprecated in favor of '
'urllib.parse.unquote_plus().',
RemovedInDjango40Warning, stacklevel=2,
)
return unquote_plus(quoted_url)
def urlencode(query, doseq=False):
"""
A version of Python's urllib.parse.urlencode() function that can operate on
MultiValueDict and non-string values.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
query_params = []
for key, value in query:
if value is None:
raise TypeError(
"Cannot encode None for key '%s' in a query string. Did you "
"mean to pass an empty string or omit the value?" % key
)
elif not doseq or isinstance(value, (str, bytes)):
query_val = value
else:
try:
itr = iter(value)
except TypeError:
query_val = value
else:
# Consume generators and iterators, when doseq=True, to
# work around https://bugs.python.org/issue31706.
query_val = []
for item in itr:
if item is None:
raise TypeError(
"Cannot encode None for key '%s' in a query "
"string. Did you mean to pass an empty string or "
"omit the value?" % key
)
elif not isinstance(item, bytes):
item = str(item)
query_val.append(item)
query_params.append((key, query_val))
return original_urlencode(query_params, doseq)
def http_date(epoch_seconds=None):
"""
Format the time to match the RFC1123 date format as specified by HTTP
RFC7231 section 7.1.1.1.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Return an integer expressed in seconds since the epoch, in UTC.
"""
# email.utils.parsedate() does the job for RFC1123 dates; unfortunately
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m['year'])
if year < 100:
current_year = datetime.datetime.utcnow().year
current_century = current_year - (current_year % 100)
if year - (current_year % 100) > 50:
# year that appears to be more than 50 years in the future are
# interpreted as representing the past.
year += current_century - 100
else:
year += current_century
month = MONTHS.index(m['mon'].lower()) + 1
day = int(m['day'])
hour = int(m['hour'])
min = int(m['min'])
sec = int(m['sec'])
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception as exc:
raise ValueError("%r is not a valid date" % date) from exc
def parse_http_date_safe(date):
"""
Same as parse_http_date, but return None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Convert a base 36 string to an int. Raise ValueError if the input won't fit
into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is longer than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
return int(s, 36)
def int_to_base36(i):
"""Convert an integer to a base36 string."""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encode a bytestring to a base64 string for use in URLs. Strip any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=').decode('ascii')
def urlsafe_base64_decode(s):
"""
Decode a base64 encoded string. Add back any trailing equal signs that
might have been stripped.
"""
s = s.encode()
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parse a string of ETags given in an If-None-Match or If-Match header as
defined by RFC 7232. Return a list of quoted ETags, or ['*'] if all ETags
should be matched.
"""
if etag_str.strip() == '*':
return ['*']
else:
# Parse each ETag individually, and return any that are valid.
etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(','))
return [match[1] for match in etag_matches if match]
def quote_etag(etag_str):
"""
If the provided string is already a quoted ETag, return it. Otherwise, wrap
the string in quotes, making it a strong ETag.
"""
if ETAG_MATCH.match(etag_str):
return etag_str
else:
return '"%s"' % etag_str
def is_same_domain(host, pattern):
"""
Return ``True`` if the host is either an exact match or a match
to the wildcard pattern.
Any pattern beginning with a period matches a domain and all of its
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
``foo.example.com``). Anything else is an exact string match.
"""
if not pattern:
return False
pattern = pattern.lower()
return (
pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or
pattern == host
)
def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):
"""
Return ``True`` if the url uses an allowed host and a safe scheme.
Always return ``False`` on an empty url.
If ``require_https`` is ``True``, only 'https' will be considered a valid
scheme, as opposed to 'http' and 'https' with the default, ``False``.
Note: "True" doesn't entail that a URL is "safe". It may still be e.g.
quoted incorrectly. Ensure to also use django.utils.encoding.iri_to_uri()
on the path component of untrusted URLs.
"""
if url is not None:
url = url.strip()
if not url:
return False
if allowed_hosts is None:
allowed_hosts = set()
elif isinstance(allowed_hosts, str):
allowed_hosts = {allowed_hosts}
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return (
_url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=require_https) and
_url_has_allowed_host_and_scheme(url.replace('\\', '/'), allowed_hosts, require_https=require_https)
)
def is_safe_url(url, allowed_hosts, require_https=False):
warnings.warn(
'django.utils.http.is_safe_url() is deprecated in favor of '
'url_has_allowed_host_and_scheme().',
RemovedInDjango40Warning, stacklevel=2,
)
return url_has_allowed_host_and_scheme(url, allowed_hosts, require_https)
# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function.
def _urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = _urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
# Copied from urllib.parse.urlsplit() with
# https://github.com/python/cpython/pull/661 applied.
def _urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
for c in url[:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i + 1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
return _coerce_result(v)
def _url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
try:
url_info = _urlparse(url)
except ValueError: # e.g. invalid IPv6 addresses
return False
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
scheme = url_info.scheme
# Consider URLs without a scheme (e.g. //example.com/p) to be http.
if not url_info.scheme and url_info.netloc:
scheme = 'http'
valid_schemes = ['https'] if require_https else ['http', 'https']
return ((not url_info.netloc or url_info.netloc in allowed_hosts) and
(not scheme or scheme in valid_schemes))
def limited_parse_qsl(qs, keep_blank_values=False, encoding='utf-8',
errors='replace', fields_limit=None):
"""
Return a list of key/value tuples parsed from query string.
Copied from urlparse with an additional "fields_limit" argument.
Copyright (C) 2013 Python Software Foundation (see LICENSE.python).
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
fields_limit: maximum number of fields parsed or an exception
is raised. None means no limit and is the default.
"""
if fields_limit:
pairs = FIELDS_MATCH.split(qs, fields_limit)
if len(pairs) > fields_limit:
raise TooManyFieldsSent(
'The number of GET/POST parameters exceeded '
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
)
else:
pairs = FIELDS_MATCH.split(qs)
r = []
for name_value in pairs:
if not name_value:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if nv[1] or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
r.append((name, value))
return r
def escape_leading_slashes(url):
"""
If redirecting to an absolute path (two leading slashes), a slash must be
escaped to prevent browsers from handling the path as schemaless and
redirecting to another host.
"""
if url.startswith('//'):
url = '/%2F{}'.format(url[2:])
return url
|
|
#
# Copyright (C) 2016 UAVCAN Development Team <uavcan.org>
#
# This software is distributed under the terms of the MIT License.
#
# Author: Pavel Kirienko <pavel.kirienko@zubax.com>
#
import time
import pyuavcan_v0
import logging
import queue
from PyQt5.QtWidgets import QWidget, QDialog, QPlainTextEdit, QSpinBox, QHBoxLayout, QVBoxLayout, QComboBox, \
QCompleter, QLabel
from PyQt5.QtCore import Qt, QTimer
from . import CommitableComboBoxWithHistory, make_icon_button, get_monospace_font, show_error, FilterBar
logger = logging.getLogger(__name__)
class QuantityDisplay(QWidget):
def __init__(self, parent, quantity_name, units_of_measurement):
super(QuantityDisplay, self).__init__(parent)
self._label = QLabel('?', self)
layout = QHBoxLayout(self)
layout.addStretch(1)
layout.addWidget(QLabel(quantity_name, self))
layout.addWidget(self._label)
layout.addWidget(QLabel(units_of_measurement, self))
layout.addStretch(1)
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
def set(self, value):
self._label.setText(str(value))
class RateEstimator:
def __init__(self, update_interval=0.5, averaging_period=4):
self._update_interval = update_interval
self._estimate_lifetime = update_interval * averaging_period
self._averaging_period = averaging_period
self._hist = []
self._checkpoint_ts = 0
self._events_since_checkpoint = 0
self._estimate_expires_at = time.monotonic()
def register_event(self, timestamp):
self._events_since_checkpoint += 1
dt = timestamp - self._checkpoint_ts
if dt >= self._update_interval:
# Resetting the stat if expired
mono_ts = time.monotonic()
expired = mono_ts > self._estimate_expires_at
self._estimate_expires_at = mono_ts + self._estimate_lifetime
if expired:
self._hist = []
elif len(self._hist) >= self._averaging_period:
self._hist.pop()
# Updating the history
self._hist.insert(0, self._events_since_checkpoint / dt)
self._checkpoint_ts = timestamp
self._events_since_checkpoint = 0
def get_rate_with_timestamp(self):
if time.monotonic() <= self._estimate_expires_at:
return (sum(self._hist) / len(self._hist)), self._checkpoint_ts
class SubscriberWindow(QDialog):
WINDOW_NAME_PREFIX = 'Subscriber'
def __init__(self, parent, node, active_data_type_detector):
super(SubscriberWindow, self).__init__(parent)
self.setWindowTitle(self.WINDOW_NAME_PREFIX)
self.setAttribute(Qt.WA_DeleteOnClose) # This is required to stop background timers!
self._node = node
self._active_data_type_detector = active_data_type_detector
self._active_data_type_detector.message_types_updated.connect(self._update_data_type_list)
self._message_queue = queue.Queue()
self._subscriber_handle = None
self._update_timer = QTimer(self)
self._update_timer.setSingleShot(False)
self._update_timer.timeout.connect(self._do_redraw)
self._update_timer.start(100)
self._log_viewer = QPlainTextEdit(self)
self._log_viewer.setReadOnly(True)
self._log_viewer.setLineWrapMode(QPlainTextEdit.NoWrap)
self._log_viewer.setFont(get_monospace_font())
self._log_viewer.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
try:
self._log_viewer.setPlaceholderText('Received messages will be printed here in YAML format')
except AttributeError: # Old PyQt
pass
self._num_rows_spinbox = QSpinBox(self)
self._num_rows_spinbox.setToolTip('Number of rows to display; large number will impair performance')
self._num_rows_spinbox.valueChanged.connect(
lambda: self._log_viewer.setMaximumBlockCount(self._num_rows_spinbox.value()))
self._num_rows_spinbox.setMinimum(1)
self._num_rows_spinbox.setMaximum(1000000)
self._num_rows_spinbox.setValue(100)
self._num_errors = 0
self._num_messages_total = 0
self._num_messages_past_filter = 0
self._msgs_per_sec_estimator = RateEstimator()
self._num_messages_total_label = QuantityDisplay(self, 'Total', 'msgs')
self._num_messages_past_filter_label = QuantityDisplay(self, 'Accepted', 'msgs')
self._msgs_per_sec_label = QuantityDisplay(self, 'Accepting', 'msg/sec')
self._type_selector = CommitableComboBoxWithHistory(self)
self._type_selector.setToolTip('Name of the message type to subscribe to')
self._type_selector.setInsertPolicy(QComboBox.NoInsert)
completer = QCompleter(self._type_selector)
completer.setCaseSensitivity(Qt.CaseSensitive)
completer.setModel(self._type_selector.model())
self._type_selector.setCompleter(completer)
self._type_selector.on_commit = self._do_start
self._type_selector.setFont(get_monospace_font())
self._type_selector.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self._type_selector.setFocus(Qt.OtherFocusReason)
self._active_filter = None
self._filter_bar = FilterBar(self)
self._filter_bar.on_filter = self._install_filter
self._start_stop_button = make_icon_button('video-camera', 'Begin subscription', self, checkable=True,
on_clicked=self._toggle_start_stop)
self._pause_button = make_icon_button('pause', 'Pause updates, non-displayed messages will be queued in memory',
self, checkable=True)
self._clear_button = make_icon_button('trash-o', 'Clear output and reset stat counters', self,
on_clicked=self._do_clear)
self._show_all_message_types = make_icon_button('puzzle-piece',
'Show all known message types, not only those that are '
'currently being exchanged over the bus',
self, checkable=True, on_clicked=self._update_data_type_list)
layout = QVBoxLayout(self)
controls_layout = QHBoxLayout(self)
controls_layout.addWidget(self._start_stop_button)
controls_layout.addWidget(self._pause_button)
controls_layout.addWidget(self._clear_button)
controls_layout.addWidget(self._filter_bar.add_filter_button)
controls_layout.addWidget(self._show_all_message_types)
controls_layout.addWidget(self._type_selector, 1)
controls_layout.addWidget(self._num_rows_spinbox)
layout.addLayout(controls_layout)
layout.addWidget(self._filter_bar)
layout.addWidget(self._log_viewer, 1)
stats_layout = QHBoxLayout(self)
stats_layout.addWidget(self._num_messages_total_label)
stats_layout.addWidget(self._num_messages_past_filter_label)
stats_layout.addWidget(self._msgs_per_sec_label)
layout.addLayout(stats_layout)
self.setLayout(layout)
# Initial updates
self._update_data_type_list()
def _install_filter(self, f):
self._active_filter = f
def _apply_filter(self, yaml_message):
"""This function will throw if the filter expression is malformed!"""
if self._active_filter is None:
return True
return self._active_filter.match(yaml_message)
def _on_message(self, e):
# Global statistics
self._num_messages_total += 1
# Rendering and filtering
try:
text = pyuavcan_v0.to_yaml(e)
if not self._apply_filter(text):
return
except Exception as ex:
self._num_errors += 1
text = '!!! [%d] MESSAGE PROCESSING FAILED: %s' % (self._num_errors, ex)
else:
self._num_messages_past_filter += 1
self._msgs_per_sec_estimator.register_event(e.transfer.ts_monotonic)
# Sending the text for later rendering
try:
self._message_queue.put_nowait(text)
except queue.Full:
pass
def _toggle_start_stop(self):
try:
if self._subscriber_handle is None:
self._do_start()
else:
self._do_stop()
finally:
self._start_stop_button.setChecked(self._subscriber_handle is not None)
def _do_stop(self):
if self._subscriber_handle is not None:
self._subscriber_handle.remove()
self._subscriber_handle = None
self._pause_button.setChecked(False)
self.setWindowTitle(self.WINDOW_NAME_PREFIX)
def _do_start(self):
self._do_stop()
self._do_clear()
try:
selected_type = self._type_selector.currentText().strip()
if not selected_type:
return
data_type = pyuavcan_v0.TYPENAMES[selected_type]
except Exception as ex:
show_error('Subscription error', 'Could not load requested data type', ex, self)
return
try:
self._subscriber_handle = self._node.add_handler(data_type, self._on_message)
except Exception as ex:
show_error('Subscription error', 'Could not create requested subscription', ex, self)
return
self.setWindowTitle('%s [%s]' % (self.WINDOW_NAME_PREFIX, selected_type))
self._start_stop_button.setChecked(True)
def _do_redraw(self):
self._num_messages_total_label.set(self._num_messages_total)
self._num_messages_past_filter_label.set(self._num_messages_past_filter)
estimated_rate = self._msgs_per_sec_estimator.get_rate_with_timestamp()
self._msgs_per_sec_label.set('N/A' if estimated_rate is None else ('%.0f' % estimated_rate[0]))
if self._pause_button.isChecked():
return
self._log_viewer.setUpdatesEnabled(False)
while True:
try:
text = self._message_queue.get_nowait()
except queue.Empty:
break
else:
self._log_viewer.appendPlainText(text + '\n')
self._log_viewer.setUpdatesEnabled(True)
def _update_data_type_list(self):
logger.info('Updating data type list')
if self._show_all_message_types.isChecked():
items = self._active_data_type_detector.get_names_of_all_message_types_with_data_type_id()
else:
items = self._active_data_type_detector.get_names_of_active_messages()
self._type_selector.clear()
self._type_selector.addItems(items)
def _do_clear(self):
self._num_messages_total = 0
self._num_messages_past_filter = 0
self._do_redraw()
self._log_viewer.clear()
def closeEvent(self, qcloseevent):
try:
self._subscriber_handle.close()
except Exception:
pass
super(SubscriberWindow, self).closeEvent(qcloseevent)
@staticmethod
def spawn(parent, node, active_data_type_detector):
SubscriberWindow(parent, node, active_data_type_detector).show()
|
|
"""
Central control loop
====================
An implementation of the control loop.
"""
import logging, json
log = logging.getLogger()
from . import atcommands as at
from . import navdata
from . import videopacket
class ConnectionError(Exception):
"""A class used to represent a connection error to the drone.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
class ControlLoop(object):
"""A control loop for the drone. Initialises navdata and video streams.
You must call the connect and disconnect methods on the control loop before
trying any control methods.
*host* is the IP address of the drone
*control_host* is the IP address to which decoded packets will be sent
*video_cb* is a callable which will be passed a sequence of bytes in
RGB565 (==RGB16) format for each video frame.
If non-None, navdata_cb is a callable which will be passed a block as
defined in ardrone.core.navdata (e.g. DmoBlock, VisionDetectBlock, etc) as
and when verified navdata packets arrive.
>>> from ..platform import dummy
>>> con = dummy.Connection()
>>> cl = ControlLoop(con)
"""
# Connection numbers
_AT = 1
_NAV = 2
_VID = 3
_CONTROL = 4
_CONFIG = 5
_CONTROL_DATA = 6
_VIDEO_DATA = 7
def __init__(self, connection,
video_cb=None, navdata_cb=None,
host='192.168.1.1', control_host='127.0.0.1',
at_port=5556, nav_port=5554, vid_port=5555, config_port=5559,
control_port=5560, control_data_port=5561,video_data_port=5562,
control_data_listening_port=3456,video_data_listening_port=3457,
bind_host = None):
self._connection = connection
self._reset_sequence()
self._vid_decoder = videopacket.Decoder(video_cb)
self._flying = False
self.navdata_cb = navdata_cb
# State for navdata
self._last_navdata_sequence = 0
# State for control
self._last_control_sequence = 0
self._last_control_state = {
'reset': False,
'take_off': False,
'roll': 0.0,
'pitch': 0.0,
'yaw': 0.0,
'gas': 0.0,
}
# Open the connections
self._connection.open(ControlLoop._AT, (host, at_port), (bind_host, at_port, None))
self._connection.open(ControlLoop._NAV, (host, nav_port), (bind_host, nav_port, self._got_navdata))
self._connection.open(ControlLoop._VID, (host, vid_port), (bind_host, vid_port, self._got_video))
self._connection.open(ControlLoop._CONTROL, (host, control_port), (None, control_port, self._got_control))
self._connection.open(ControlLoop._CONFIG, (host, config_port), (None, config_port, self._got_config))
self._connection.open(ControlLoop._CONTROL_DATA, (control_host, control_data_port), (None, control_data_listening_port, None))
self._connection.open(ControlLoop._VIDEO_DATA, (control_host, video_data_port), (None, video_data_listening_port, None))
self._config_current = None
self._config_to_send = []
self._config_ack_timeout = 0
def tick(self):
if self._config_ack_timeout > 0:
self._config_ack_timeout -= 1
def bootstrap(self):
"""Initialise all the drone data streams."""
log.info('Bootstrapping communication with the drone.')
self.reset()
self.flat_trim()
self.start_navdata()
self.start_video()
self.get_config()
def get_config(self):
"""Ask the drone for it's configuration."""
log.info('Requesting configuration from drone.')
self._send(at.ctrl(4))
def flat_trim(self):
r"""Send a take off command.
"""
self._send(at.ftrim())
def take_off(self):
r"""Send a take off command.
"""
self._send(''.join([at.config('control:outdoor', False), at.ref(take_off = True)]))
def land(self):
r"""Send a land command.
"""
self._send(at.ref(take_off = False, reset = False))
def view_camera(self,channel):
r""" Set camera channel to stream from drone.
"""
self._send(at.zap(channel))
def hover(self):
r"""Send a hover command.
"""
self._send(at.pcmd())
def reset(self):
r"""Send a reset command to the drone.
Forward facing camera = channel 0
Downward facing camera = channel 1
"""
self._send(at.ref(reset = True))
def start_video(self):
self._connection.viddata_cb = self._vid_decoder.decode
self._connection.put(ControlLoop._VID, b'\x01\x00\x00\x00')
def start_navdata(self):
log.info('starting navdata streaming')
log.debug('Navdata booststrap stage 1');
# See Dev. guide 7.1.2 pp. 40
self._last_navdata_sequence = 0
self._connection.put(ControlLoop._NAV, b'\x01\x00\x00\x00')
self.send_config()
def _got_config(self, data):
log.debug('Got config packet len: %s' % (len(data),))
return
def _got_navdata(self, data):
ndh, packets = navdata.split(data)
if not ndh.valid():
log.error('Got invalid navdata packet')
return
if ndh.sequence == 1:
self._last_navdata_sequence = 0
# Check the packet sequence number
if ndh.sequence <= self._last_navdata_sequence:
log.error('Dropping out of sequence navdata packet: %s' % (ndh.sequence,))
return
# Record the sequence number
self._last_navdata_sequence = ndh.sequence
# Dev. guide pp. 40: lost communication
if (ndh.state & navdata.ARDRONE_COM_LOST_MASK) != 0:
log.warning('Lost connection, re-establishing navdata connection.')
self._last_navdata_sequence = 0
self.start_navdata()
return
# Dev. guide pp. 40: watchdog state
# this is a case where the sequence counter should be reset
if (ndh.state & navdata.ARDRONE_COM_WATCHDOG_MASK) != 0:
self._last_navdata_sequence = 0
self._connection.put(ControlLoop._AT, at.comwdg())
# Is the AR_DRONE_NAVDATA_BOOSTRAP status bit set (Dev. guide fig 7.1)
if (ndh.state & navdata.ARDRONE_NAVDATA_BOOTSTRAP) != 0:
if self._config_ack_timeout == 0:
log.info('Navdata booststrap')
if len(self._config_to_send) == 0:
self._config_current = ('general:navdata_demo', True)
self._config_ack_timeout = 30
self.send_config()
if (ndh.state & navdata.ARDRONE_COMMAND_MASK) != 0:
self._send(at.ctrl(5))
self._config_ack_timeout = 0
self._config_current = None
if len(self._config_to_send) > 0 and self._config_ack_timeout == 0 and self._config_current is None:
self._config_current = self._config_to_send[0]
self._config_to_send = self._config_to_send[1:]
if self._config_current is not None and self._config_ack_timeout == 0:
key, value = self._config_current
log.info('Sending: %s = %s' % (key, value))
self._send(at.config(key, value))
self._config_ack_timeout = 30
# Record flying state
self._flying = (ndh.state & navdata.ARDRONE_FLY_MASK) != 0
for packet in packets:
# Send a JSON encoded control packet to the controller
self._connection.put(ControlLoop._CONTROL_DATA, packet.json())
# Call the navdata callable if one is configured
if self.navdata_cb is not None:
self.navdata_cb(packet)
def send_config(self):
self._config_to_send = []
# self._config_to_send.append(('general:navdata_demo', True))
self._config_to_send.append(('general:navdata_demo', False)) # required for video detect
# self._config_to_send.append(('video:video_channel', 0))
#self._config_to_send.append(('video:video_bitrate_control_mode', '1')) # Dynamic
#self._config_to_send.append(('video:video_codec', '64'))
self._config_to_send.append(('general:vision_enable', True))
self._config_to_send.append(('detect:detect_type', 2))
self._config_to_send.append(('detect:enemy_colors', 2)) # orange-yellow-orange
self._config_to_send.append(('detect:detections_select_h', 1))
self._config_to_send.append(('detect:enemy_without_shell', False))
def _got_config(self, packet):
log.info('Got config len %i' % (len(packet),))
def _got_video(self, packet):
self._connection.put(ControlLoop._VIDEO_DATA, packet)
self._vid_decoder.decode(packet) #comment this out when done elsewhere
def _got_control(self, packet):
# log.debug('Got control packet: %r' % (packet,))
# Parse packet
data = json.loads(packet.decode())
# Reset sequence counter if we get a '1'
if data['seq'] == 1:
self._last_control_sequence = 0
# Check and update sequence counter
if data['seq'] <= self._last_control_sequence:
log.warning('Dropping control packet with invalid sequence number: %i' % (data.seq,))
return
self._last_control_sequence = data['seq']
# Extract control state
state = data['state']
#log.debug('Control state: %r' % (state,))
# Compare state to recorded state
if not self._last_control_state['reset'] and state['reset']:
self.bootstrap()
if not self._last_control_state['take_off'] and state['take_off']:
if self._flying:
self.land()
else:
self.take_off()
# Send the command state
self._send(at.pcmd(not state['hover'], False, state['roll'], state['pitch'], state['gas'], state['yaw']))
# Record this state
self._last_control_state = state
def _reset_sequence(self):
at.reset_sequence()
def _send(self, cmd):
log.debug('Sending: %r' % (cmd,))
self._connection.put(ControlLoop._AT, cmd)
|
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow helper magics.
Provides a %tensorflow_version line magic which allows the user to select which
version of TensorFlow will be loaded when they do 'import tensorflow as tf'.
"""
from __future__ import print_function
import collections
import os
import sys
import textwrap
import pkg_resources
# A map of tensorflow version to installed location. If the installed
# location is `None`, TensorflowMagics assumes that the package is
# available in sys.path by default.
#
# This list must correspond to the TensorFlow installations on the host Colab
# instance.
_VersionInfo = collections.namedtuple("_VersionInfo",
["name", "path", "version"])
_VERSIONS = {
"1": _VersionInfo("1.x", "/tensorflow-1.15.2", "1.15.2"),
"2": _VersionInfo("2.x", None, "2.8.0"),
}
_DEFAULT_VERSION = _VERSIONS["2"]
_INSTALLED_VERSION = _VERSIONS["2"]
def _get_python_path(version):
"""Gets the Python path entry for TensorFlow modules.
Args:
version: A _VersionInfo object representing a version of TF.
Returns:
A string suitable for inclusion in the `PYTHONPATH` environment
variable or in `sys.path`, or `None` if no path manipulation is
required to use the provided version of TensorFlow.
"""
if version.path is None:
return None
return os.path.join(
version.path, "python{}.{}".format(sys.version_info[0],
sys.version_info[1]))
def _get_os_path(version):
"""Gets the OS path entry for TensorFlow binaries.
Args:
version: A _VersionInfo object representing a version of TF.
Returns:
A string suitable for inclusion in the `PATH` environment variable,
or `None` if no path manipulation is required to use binaries from
the provided version of TensorFlow.
"""
python_path = _get_python_path(version)
if python_path is None:
return None
return os.path.join(python_path, "bin")
def _drop_and_prepend(xs, to_drop, to_prepend):
"""Filters a list in place (maybe), then prepend an element (maybe).
Args:
xs: A list to mutate in place.
to_drop: A string to remove from `xs` (all occurrences), or `None` to not
drop anything.
to_prepend: A string to prepend to `xs`, or `None` to not prepend anything.
"""
if to_drop is not None:
xs[:] = [x for x in xs if x != to_drop]
if to_prepend is not None:
xs.insert(0, to_prepend)
def _drop_and_prepend_env(key, to_drop, to_prepend, empty_includes_cwd):
"""Like `_drop_and_prepend_env`, but mutate an environment variable.
Args:
key: The environment variable to modify.
to_drop: A path component to remove from the environment variable (all
occurrences), or `None` to not drop anything.
to_prepend: A path component to prepend to the environment variable, or
`None` to not prepend anything.
empty_includes_cwd: Whether the semantics of the given environment variable
treat an unset or empty value as including the current working directory
(as with POSIX `$PATH`) or not (as with Python 3 `$PYTHONPATH`).
"""
env_value = os.environ.get(key, "")
if env_value:
parts = env_value.split(os.pathsep)
else:
parts = [""] if empty_includes_cwd else []
_drop_and_prepend(parts, to_drop, to_prepend)
os.environ[key] = os.pathsep.join(parts)
def _get_tf_version():
"""Get the current tensorflow version via pkg_resources."""
# pkg_resources.get_distribution uses sys.path at the time pkg_resources was
# imported, so we constsruct our own WorkingSet here.
tf_dist = pkg_resources.WorkingSet(sys.path).find(
pkg_resources.Requirement.parse("tensorflow"))
if tf_dist is None:
return None
return tf_dist.version
_instance = None
class _TFVersionManager(object):
"""Class that manages the TensorFlow version used by Colab."""
def __init__(self):
self._version = _DEFAULT_VERSION
self.explicitly_set = False
def _maybe_switch_tpu_version(self, version):
"""Switch the TPU TF version (if needed)."""
# Avoid forcing a kernel restart on users updating requests if they haven't
# yet used our TF magics.
import requests # pylint: disable=g-import-not-at-top
if "COLAB_TPU_ADDR" not in os.environ:
return
# See b/141173168 for why this path.
url = "http://{}:8475/requestversion/{}".format(
os.environ["COLAB_TPU_ADDR"].split(":")[0], version)
resp = requests.post(url)
if resp.status_code != 200:
print("Failed to switch the TPU to TF {}".format(version))
def _set_version(self, version):
"""Perform version change by manipulating PATH/PYTHONPATH."""
old_python_path = _get_python_path(self._version)
new_python_path = _get_python_path(version)
old_os_path = _get_os_path(self._version)
new_os_path = _get_os_path(version)
# Fix up `sys.path`, for Python imports within this process.
_drop_and_prepend(sys.path, old_python_path, new_python_path)
# Fix up `$PYTHONPATH`, for Python imports in subprocesses.
_drop_and_prepend_env(
"PYTHONPATH",
old_python_path,
new_python_path,
empty_includes_cwd=False)
# Fix up `$PATH`, for locations of subprocess binaries.
_drop_and_prepend_env(
"PATH", old_os_path, new_os_path, empty_includes_cwd=True)
tf_version = _get_tf_version()
self._maybe_switch_tpu_version(tf_version)
self._version = version
def current_version(self):
return self._version
def _tensorflow_version(line):
"""Implements the tensorflow_version line magic.
If no parameter is specified, prints the currently selected and available
TensorFlow versions. Otherwise, changes the selected version to the given
version, if it exists.
Args:
line: the version parameter or the empty string.
"""
line = line.strip()
current_version_name = _instance.current_version().name
version_names = [v.name for v in _VERSIONS.values()]
if not line:
print("Currently selected TF version: {}".format(current_version_name))
print("Available versions:\n* {}".format("\n* ".join(version_names)))
return
_instance.explicitly_set = True
if line == current_version_name:
# Nothing to do
return
if line not in version_names:
old_line = line
if line.startswith("1"):
line = _VERSIONS["1"].name
if line.startswith("2"):
line = _VERSIONS["2"].name
if line != old_line:
print(
textwrap.dedent("""\
`%tensorflow_version` only switches the major version: {versions}.
You set: `{old_line}`. This will be interpreted as: `{line}`.
""".format(
versions=" or ".join(version_names), old_line=old_line, line=line)))
if line not in version_names:
print("Unknown TensorFlow version: {}".format(line))
print("Currently selected TF version: {}".format(current_version_name))
print("Available versions:\n * {}".format("\n * ".join(version_names)))
return
if "tensorflow" in sys.modules:
print("TensorFlow is already loaded. Please restart the runtime to "
"change versions.")
else:
version = [v for v in _VERSIONS.values() if v.name == line][0]
_instance._set_version(version) # pylint: disable=protected-access
print("TensorFlow {} selected.".format(line))
def _explicitly_set():
if _instance is None:
return False
return _instance.explicitly_set
def _initialize():
global _instance
if _instance is not None:
raise TypeError("Initialize called multiple times.")
_instance = _TFVersionManager()
def _register_magics(ip):
_initialize()
ip.register_magic_function(
_tensorflow_version, magic_kind="line", magic_name="tensorflow_version")
|
|
"""TemplateEntity utility class."""
from __future__ import annotations
from collections.abc import Callable
import contextlib
import itertools
import logging
from typing import Any
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_ENTITY_PICTURE_TEMPLATE,
CONF_FRIENDLY_NAME,
CONF_ICON,
CONF_ICON_TEMPLATE,
CONF_NAME,
EVENT_HOMEASSISTANT_START,
)
from homeassistant.core import CoreState, Event, callback
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import (
TrackTemplate,
TrackTemplateResult,
async_track_template_result,
)
from homeassistant.helpers.template import Template, result_as_boolean
from .const import (
CONF_ATTRIBUTE_TEMPLATES,
CONF_ATTRIBUTES,
CONF_AVAILABILITY,
CONF_AVAILABILITY_TEMPLATE,
CONF_PICTURE,
)
_LOGGER = logging.getLogger(__name__)
TEMPLATE_ENTITY_AVAILABILITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_AVAILABILITY): cv.template,
}
)
TEMPLATE_ENTITY_ICON_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ICON): cv.template,
}
)
TEMPLATE_ENTITY_COMMON_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ATTRIBUTES): vol.Schema({cv.string: cv.template}),
vol.Optional(CONF_AVAILABILITY): cv.template,
vol.Optional(CONF_ICON): cv.template,
vol.Optional(CONF_PICTURE): cv.template,
}
)
TEMPLATE_ENTITY_ATTRIBUTES_SCHEMA_LEGACY = vol.Schema(
{
vol.Optional(CONF_ATTRIBUTE_TEMPLATES, default={}): vol.Schema(
{cv.string: cv.template}
),
}
)
TEMPLATE_ENTITY_AVAILABILITY_SCHEMA_LEGACY = vol.Schema(
{
vol.Optional(CONF_AVAILABILITY_TEMPLATE): cv.template,
}
)
TEMPLATE_ENTITY_COMMON_SCHEMA_LEGACY = vol.Schema(
{
vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template,
vol.Optional(CONF_ICON_TEMPLATE): cv.template,
}
).extend(TEMPLATE_ENTITY_AVAILABILITY_SCHEMA_LEGACY.schema)
LEGACY_FIELDS = {
CONF_ICON_TEMPLATE: CONF_ICON,
CONF_ENTITY_PICTURE_TEMPLATE: CONF_PICTURE,
CONF_AVAILABILITY_TEMPLATE: CONF_AVAILABILITY,
CONF_ATTRIBUTE_TEMPLATES: CONF_ATTRIBUTES,
CONF_FRIENDLY_NAME: CONF_NAME,
}
def rewrite_common_legacy_to_modern_conf(
entity_cfg: dict[str, Any], extra_legacy_fields: dict[str, str] = None
) -> dict[str, Any]:
"""Rewrite legacy config."""
entity_cfg = {**entity_cfg}
if extra_legacy_fields is None:
extra_legacy_fields = {}
for from_key, to_key in itertools.chain(
LEGACY_FIELDS.items(), extra_legacy_fields.items()
):
if from_key not in entity_cfg or to_key in entity_cfg:
continue
val = entity_cfg.pop(from_key)
if isinstance(val, str):
val = Template(val)
entity_cfg[to_key] = val
if CONF_NAME in entity_cfg and isinstance(entity_cfg[CONF_NAME], str):
entity_cfg[CONF_NAME] = Template(entity_cfg[CONF_NAME])
return entity_cfg
class _TemplateAttribute:
"""Attribute value linked to template result."""
def __init__(
self,
entity: Entity,
attribute: str,
template: Template,
validator: Callable[[Any], Any] = None,
on_update: Callable[[Any], None] | None = None,
none_on_template_error: bool | None = False,
) -> None:
"""Template attribute."""
self._entity = entity
self._attribute = attribute
self.template = template
self.validator = validator
self.on_update = on_update
self.async_update = None
self.none_on_template_error = none_on_template_error
@callback
def async_setup(self):
"""Config update path for the attribute."""
if self.on_update:
return
if not hasattr(self._entity, self._attribute):
raise AttributeError(f"Attribute '{self._attribute}' does not exist.")
self.on_update = self._default_update
@callback
def _default_update(self, result):
attr_result = None if isinstance(result, TemplateError) else result
setattr(self._entity, self._attribute, attr_result)
@callback
def handle_result(
self,
event: Event | None,
template: Template,
last_result: str | None | TemplateError,
result: str | TemplateError,
) -> None:
"""Handle a template result event callback."""
if isinstance(result, TemplateError):
_LOGGER.error(
"TemplateError('%s') "
"while processing template '%s' "
"for attribute '%s' in entity '%s'",
result,
self.template,
self._attribute,
self._entity.entity_id,
)
if self.none_on_template_error:
self._default_update(result)
else:
assert self.on_update
self.on_update(result)
return
if not self.validator:
assert self.on_update
self.on_update(result)
return
try:
validated = self.validator(result)
except vol.Invalid as ex:
_LOGGER.error(
"Error validating template result '%s' "
"from template '%s' "
"for attribute '%s' in entity %s "
"validation message '%s'",
result,
self.template,
self._attribute,
self._entity.entity_id,
ex.msg,
)
assert self.on_update
self.on_update(None)
return
assert self.on_update
self.on_update(validated)
return
class TemplateEntity(Entity):
"""Entity that uses templates to calculate attributes."""
_attr_available = True
_attr_entity_picture = None
_attr_icon = None
_attr_should_poll = False
def __init__(
self,
hass,
*,
availability_template=None,
icon_template=None,
entity_picture_template=None,
attribute_templates=None,
config=None,
fallback_name=None,
unique_id=None,
):
"""Template Entity."""
self._template_attrs = {}
self._async_update = None
self._attr_extra_state_attributes = {}
self._self_ref_update_count = 0
self._attr_unique_id = unique_id
if config is None:
self._attribute_templates = attribute_templates
self._availability_template = availability_template
self._icon_template = icon_template
self._entity_picture_template = entity_picture_template
self._friendly_name_template = None
else:
self._attribute_templates = config.get(CONF_ATTRIBUTES)
self._availability_template = config.get(CONF_AVAILABILITY)
self._icon_template = config.get(CONF_ICON)
self._entity_picture_template = config.get(CONF_PICTURE)
self._friendly_name_template = config.get(CONF_NAME)
# Try to render the name as it can influence the entity ID
self._attr_name = fallback_name
if self._friendly_name_template:
self._friendly_name_template.hass = hass
with contextlib.suppress(TemplateError):
self._attr_name = self._friendly_name_template.async_render(
parse_result=False
)
# Templates will not render while the entity is unavailable, try to render the
# icon and picture templates.
if self._entity_picture_template:
self._entity_picture_template.hass = hass
with contextlib.suppress(TemplateError):
self._attr_entity_picture = self._entity_picture_template.async_render(
parse_result=False
)
if self._icon_template:
self._icon_template.hass = hass
with contextlib.suppress(TemplateError):
self._attr_icon = self._icon_template.async_render(parse_result=False)
@callback
def _update_available(self, result):
if isinstance(result, TemplateError):
self._attr_available = True
return
self._attr_available = result_as_boolean(result)
@callback
def _update_state(self, result):
if self._availability_template:
return
self._attr_available = not isinstance(result, TemplateError)
@callback
def _add_attribute_template(self, attribute_key, attribute_template):
"""Create a template tracker for the attribute."""
def _update_attribute(result):
attr_result = None if isinstance(result, TemplateError) else result
self._attr_extra_state_attributes[attribute_key] = attr_result
self.add_template_attribute(
attribute_key, attribute_template, None, _update_attribute
)
def add_template_attribute(
self,
attribute: str,
template: Template,
validator: Callable[[Any], Any] = None,
on_update: Callable[[Any], None] | None = None,
none_on_template_error: bool = False,
) -> None:
"""
Call in the constructor to add a template linked to a attribute.
Parameters
----------
attribute
The name of the attribute to link to. This attribute must exist
unless a custom on_update method is supplied.
template
The template to calculate.
validator
Validator function to parse the result and ensure it's valid.
on_update
Called to store the template result rather than storing it
the supplied attribute. Passed the result of the validator, or None
if the template or validator resulted in an error.
"""
assert self.hass is not None, "hass cannot be None"
template.hass = self.hass
template_attribute = _TemplateAttribute(
self, attribute, template, validator, on_update, none_on_template_error
)
self._template_attrs.setdefault(template, [])
self._template_attrs[template].append(template_attribute)
@callback
def _handle_results(
self,
event: Event | None,
updates: list[TrackTemplateResult],
) -> None:
"""Call back the results to the attributes."""
if event:
self.async_set_context(event.context)
entity_id = event and event.data.get(ATTR_ENTITY_ID)
if entity_id and entity_id == self.entity_id:
self._self_ref_update_count += 1
else:
self._self_ref_update_count = 0
if self._self_ref_update_count > len(self._template_attrs):
for update in updates:
_LOGGER.warning(
"Template loop detected while processing event: %s, skipping template render for Template[%s]",
event,
update.template.template,
)
return
for update in updates:
for attr in self._template_attrs[update.template]:
attr.handle_result(
event, update.template, update.last_result, update.result
)
self.async_write_ha_state()
async def _async_template_startup(self, *_) -> None:
template_var_tups: list[TrackTemplate] = []
has_availability_template = False
for template, attributes in self._template_attrs.items():
template_var_tup = TrackTemplate(template, None)
is_availability_template = False
for attribute in attributes:
# pylint: disable-next=protected-access
if attribute._attribute == "_attr_available":
has_availability_template = True
is_availability_template = True
attribute.async_setup()
# Insert the availability template first in the list
if is_availability_template:
template_var_tups.insert(0, template_var_tup)
else:
template_var_tups.append(template_var_tup)
result_info = async_track_template_result(
self.hass,
template_var_tups,
self._handle_results,
has_super_template=has_availability_template,
)
self.async_on_remove(result_info.async_remove)
self._async_update = result_info.async_refresh
result_info.async_refresh()
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added to hass."""
if self._availability_template is not None:
self.add_template_attribute(
"_attr_available",
self._availability_template,
None,
self._update_available,
)
if self._attribute_templates is not None:
for key, value in self._attribute_templates.items():
self._add_attribute_template(key, value)
if self._icon_template is not None:
self.add_template_attribute(
"_attr_icon", self._icon_template, vol.Or(cv.whitespace, cv.icon)
)
if self._entity_picture_template is not None:
self.add_template_attribute(
"_attr_entity_picture", self._entity_picture_template
)
if (
self._friendly_name_template is not None
and not self._friendly_name_template.is_static
):
self.add_template_attribute("_attr_name", self._friendly_name_template)
if self.hass.state == CoreState.running:
await self._async_template_startup()
return
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, self._async_template_startup
)
async def async_update(self) -> None:
"""Call for forced update."""
self._async_update()
|
|
from befh.restful_api_socket import RESTfulApiSocket
from befh.exchanges.gateway import ExchangeGateway
from befh.market_data import L2Depth, Trade
from befh.instrument import Instrument
from befh.util import Logger
import time
import threading
from functools import partial
from datetime import datetime
class ExchGwKrakenRestfulApi(RESTfulApiSocket):
"""
Exchange socket
"""
def __init__(self):
RESTfulApiSocket.__init__(self)
@classmethod
def get_bids_field_name(cls):
return 'bids'
@classmethod
def get_asks_field_name(cls):
return 'asks'
@classmethod
def get_order_book_link(cls, instmt):
return 'https://api.kraken.com/0/public/Depth?pair=%s&count=5' % instmt.get_instmt_code()
@classmethod
def get_trades_link(cls, instmt):
if instmt.get_exch_trade_id() != '' and instmt.get_exch_trade_id() != '0':
return 'https://api.kraken.com/0/public/Trades?pair=%s&since=%s' % \
(instmt.get_instmt_code(), instmt.get_exch_trade_id())
else:
return 'https://api.kraken.com/0/public/Trades?pair=%s' % instmt.get_instmt_code()
@classmethod
def parse_l2_depth(cls, instmt, raw):
"""
Parse raw data to L2 depth
:param instmt: Instrument
:param raw: Raw data in JSON
"""
l2_depth = L2Depth()
keys = list(raw.keys())
if cls.get_bids_field_name() in keys and \
cls.get_asks_field_name() in keys:
# Bids
bids = raw[cls.get_bids_field_name()]
bids = sorted(bids, key=lambda x: x[0], reverse=True)
for i in range(0, len(bids)):
l2_depth.bids[i].price = float(bids[i][0]) if not isinstance(bids[i][0], float) else bids[i][0]
l2_depth.bids[i].volume = float(bids[i][1]) if not isinstance(bids[i][1], float) else bids[i][1]
# Asks
asks = raw[cls.get_asks_field_name()]
asks = sorted(asks, key=lambda x: x[0])
for i in range(0, len(asks)):
l2_depth.asks[i].price = float(asks[i][0]) if not isinstance(asks[i][0], float) else asks[i][0]
l2_depth.asks[i].volume = float(asks[i][1]) if not isinstance(asks[i][1], float) else asks[i][1]
return l2_depth
@classmethod
def parse_trade(cls, instmt, raw):
"""
:param instmt: Instrument
:param raw: Raw data in JSON
:return:
"""
trade = Trade()
# Trade price
trade.trade_price = float(str(raw[0]))
# Trade volume
trade.trade_volume = float(str(raw[1]))
# Timestamp
date_time = float(raw[2])
trade.date_time = datetime.utcfromtimestamp(date_time).strftime("%Y%m%d %H:%M:%S.%f")
# Trade side
trade.trade_side = Trade.parse_side(raw[3])
# Trade id
trade.trade_id = trade.date_time + '-' + str(instmt.get_exch_trade_id())
return trade
@classmethod
def get_order_book(cls, instmt):
"""
Get order book
:param instmt: Instrument
:return: Object L2Depth
"""
res = cls.request(cls.get_order_book_link(instmt))
if len(res) > 0 and 'error' in res and len(res['error']) == 0:
res = list(res['result'].values())[0]
return cls.parse_l2_depth(instmt=instmt,
raw=res)
else:
Logger.error(cls.__name__, "Cannot parse the order book. Return:\n%s" % res)
return None
@classmethod
def get_trades(cls, instmt):
"""
Get trades
:param instmt: Instrument
:param trade_id: Trade id
:return: List of trades
"""
res = cls.request(cls.get_trades_link(instmt))
trades = []
if len(res) > 0 and 'error' in res and len(res['error']) == 0:
res = res['result']
if 'last' in res.keys():
instmt.set_exch_trade_id(res['last'])
del res['last']
res = list(res.values())[0]
for t in res:
trade = cls.parse_trade(instmt=instmt,
raw=t)
trades.append(trade)
return trades
class ExchGwKraken(ExchangeGateway):
"""
Exchange gateway
"""
def __init__(self, db_clients):
"""
Constructor
:param db_client: Database client
"""
ExchangeGateway.__init__(self, ExchGwKrakenRestfulApi(), db_clients)
@classmethod
def get_exchange_name(cls):
"""
Get exchange name
:return: Exchange name string
"""
return 'Kraken'
def get_order_book_worker(self, instmt):
"""
Get order book worker
:param instmt: Instrument
"""
while True:
try:
l2_depth = self.api_socket.get_order_book(instmt)
if l2_depth is not None and l2_depth.is_diff(instmt.get_l2_depth()):
instmt.set_prev_l2_depth(instmt.l2_depth.copy())
instmt.set_l2_depth(l2_depth)
instmt.incr_order_book_id()
self.insert_order_book(instmt)
except Exception as e:
Logger.error(self.__class__.__name__,
"Error in order book: %s" % e)
time.sleep(0.5)
def get_trades_worker(self, instmt):
"""
Get order book worker thread
:param instmt: Instrument name
"""
instmt.set_recovered(False)
while True:
try:
ret = self.api_socket.get_trades(instmt)
for trade in ret:
instmt.incr_trade_id()
self.insert_trade(instmt, trade)
# After the first time of getting the trade, indicate the instrument
# is recovered
if not instmt.get_recovered():
instmt.set_recovered(True)
except Exception as e:
Logger.error(self.__class__.__name__,
"Error in trades: %s\nReturn: %s" % (e, ret))
time.sleep(0.5)
def start(self, instmt):
"""
Start the exchange gateway
:param instmt: Instrument
:return List of threads
"""
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_l2_depth(L2Depth(5))
instmt.set_instmt_snapshot_table_name(self.get_instmt_snapshot_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
self.init_instmt_snapshot_table(instmt)
t1 = threading.Thread(target=partial(self.get_order_book_worker, instmt))
t1.start()
t2 = threading.Thread(target=partial(self.get_trades_worker, instmt))
t2.start()
return [t1, t2]
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Bruno Cauet.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import division, absolute_import, print_function
from os import path, remove
from tempfile import mkdtemp
from shutil import rmtree
from mock import Mock, MagicMock
from beetsplug.smartplaylist import SmartPlaylistPlugin
from beets.library import Item, Album, parse_query_string
from beets.dbcore import OrQuery
from beets.dbcore.query import NullSort, MultipleSort, FixedFieldSort
from beets.util import syspath, bytestring_path, py3_path
from beets.ui import UserError
from beets import config
from test._common import unittest
from test.helper import TestHelper
class SmartPlaylistTest(unittest.TestCase):
def test_build_queries(self):
spl = SmartPlaylistPlugin()
self.assertEqual(spl._matched_playlists, None)
self.assertEqual(spl._unmatched_playlists, None)
config['smartplaylist']['playlists'].set([])
spl.build_queries()
self.assertEqual(spl._matched_playlists, set())
self.assertEqual(spl._unmatched_playlists, set())
config['smartplaylist']['playlists'].set([
{'name': u'foo',
'query': u'FOO foo'},
{'name': u'bar',
'album_query': [u'BAR bar1', u'BAR bar2']},
{'name': u'baz',
'query': u'BAZ baz',
'album_query': u'BAZ baz'}
])
spl.build_queries()
self.assertEqual(spl._matched_playlists, set())
foo_foo = parse_query_string(u'FOO foo', Item)
baz_baz = parse_query_string(u'BAZ baz', Item)
baz_baz2 = parse_query_string(u'BAZ baz', Album)
bar_bar = OrQuery((parse_query_string(u'BAR bar1', Album)[0],
parse_query_string(u'BAR bar2', Album)[0]))
self.assertEqual(spl._unmatched_playlists, set([
(u'foo', foo_foo, (None, None)),
(u'baz', baz_baz, baz_baz2),
(u'bar', (None, None), (bar_bar, None)),
]))
def test_build_queries_with_sorts(self):
spl = SmartPlaylistPlugin()
config['smartplaylist']['playlists'].set([
{'name': u'no_sort',
'query': u'foo'},
{'name': u'one_sort',
'query': u'foo year+'},
{'name': u'only_empty_sorts',
'query': [u'foo', u'bar']},
{'name': u'one_non_empty_sort',
'query': [u'foo year+', u'bar']},
{'name': u'multiple_sorts',
'query': [u'foo year+', u'bar genre-']},
{'name': u'mixed',
'query': [u'foo year+', u'bar', u'baz genre+ id-']}
])
spl.build_queries()
sorts = dict((name, sort)
for name, (_, sort), _ in spl._unmatched_playlists)
asseq = self.assertEqual # less cluttered code
sort = FixedFieldSort # short cut since we're only dealing with this
asseq(sorts["no_sort"], NullSort())
asseq(sorts["one_sort"], sort(u'year'))
asseq(sorts["only_empty_sorts"], None)
asseq(sorts["one_non_empty_sort"], sort(u'year'))
asseq(sorts["multiple_sorts"],
MultipleSort([sort('year'), sort(u'genre', False)]))
asseq(sorts["mixed"],
MultipleSort([sort('year'), sort(u'genre'), sort(u'id', False)]))
def test_matches(self):
spl = SmartPlaylistPlugin()
a = MagicMock(Album)
i = MagicMock(Item)
self.assertFalse(spl.matches(i, None, None))
self.assertFalse(spl.matches(a, None, None))
query = Mock()
query.match.side_effect = {i: True}.__getitem__
self.assertTrue(spl.matches(i, query, None))
self.assertFalse(spl.matches(a, query, None))
a_query = Mock()
a_query.match.side_effect = {a: True}.__getitem__
self.assertFalse(spl.matches(i, None, a_query))
self.assertTrue(spl.matches(a, None, a_query))
self.assertTrue(spl.matches(i, query, a_query))
self.assertTrue(spl.matches(a, query, a_query))
def test_db_changes(self):
spl = SmartPlaylistPlugin()
nones = None, None
pl1 = '1', (u'q1', None), nones
pl2 = '2', (u'q2', None), nones
pl3 = '3', (u'q3', None), nones
spl._unmatched_playlists = set([pl1, pl2, pl3])
spl._matched_playlists = set()
spl.matches = Mock(return_value=False)
spl.db_change(None, u"nothing")
self.assertEqual(spl._unmatched_playlists, set([pl1, pl2, pl3]))
self.assertEqual(spl._matched_playlists, set())
spl.matches.side_effect = lambda _, q, __: q == u'q3'
spl.db_change(None, u"matches 3")
self.assertEqual(spl._unmatched_playlists, set([pl1, pl2]))
self.assertEqual(spl._matched_playlists, set([pl3]))
spl.matches.side_effect = lambda _, q, __: q == u'q1'
spl.db_change(None, u"matches 3")
self.assertEqual(spl._matched_playlists, set([pl1, pl3]))
self.assertEqual(spl._unmatched_playlists, set([pl2]))
def test_playlist_update(self):
spl = SmartPlaylistPlugin()
i = Mock(path=b'/tagada.mp3')
i.evaluate_template.side_effect = lambda x, _: x
q = Mock()
a_q = Mock()
lib = Mock()
lib.items.return_value = [i]
lib.albums.return_value = []
pl = b'my_playlist.m3u', (q, None), (a_q, None)
spl._matched_playlists = [pl]
dir = bytestring_path(mkdtemp())
config['smartplaylist']['relative_to'] = False
config['smartplaylist']['playlist_dir'] = py3_path(dir)
try:
spl.update_playlists(lib)
except Exception:
rmtree(dir)
raise
lib.items.assert_called_once_with(q, None)
lib.albums.assert_called_once_with(a_q, None)
m3u_filepath = path.join(dir, pl[0])
self.assertTrue(path.exists(m3u_filepath))
with open(syspath(m3u_filepath), 'rb') as f:
content = f.read()
rmtree(dir)
self.assertEqual(content, b'/tagada.mp3\n')
class SmartPlaylistCLITest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.item = self.add_item()
config['smartplaylist']['playlists'].set([
{'name': 'my_playlist.m3u',
'query': self.item.title},
{'name': 'all.m3u',
'query': u''}
])
config['smartplaylist']['playlist_dir'].set(py3_path(self.temp_dir))
self.load_plugins('smartplaylist')
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
def test_splupdate(self):
with self.assertRaises(UserError):
self.run_with_output(u'splupdate', u'tagada')
self.run_with_output(u'splupdate', u'my_playlist')
m3u_path = path.join(self.temp_dir, b'my_playlist.m3u')
self.assertTrue(path.exists(m3u_path))
with open(m3u_path, 'rb') as f:
self.assertEqual(f.read(), self.item.path + b"\n")
remove(m3u_path)
self.run_with_output(u'splupdate', u'my_playlist.m3u')
with open(m3u_path, 'rb') as f:
self.assertEqual(f.read(), self.item.path + b"\n")
remove(m3u_path)
self.run_with_output(u'splupdate')
for name in (b'my_playlist.m3u', b'all.m3u'):
with open(path.join(self.temp_dir, name), 'rb') as f:
self.assertEqual(f.read(), self.item.path + b"\n")
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
# -*- coding: utf-8 -*-
"""
DICS for power mapping
======================
In this tutorial, we'll simulate two signals originating from two
locations on the cortex. These signals will be sinusoids, so we'll be looking
at oscillatory activity (as opposed to evoked activity).
We'll use dynamic imaging of coherent sources (DICS) [1]_ to map out
spectral power along the cortex. Let's see if we can find our two simulated
sources.
"""
# Author: Marijn van Vliet <w.m.vanvliet@gmail.com>
#
# License: BSD (3-clause)
###############################################################################
# Setup
# -----
# We first import the required packages to run this tutorial and define a list
# of filenames for various things we'll be using.
import os.path as op
import numpy as np
from scipy.signal import welch, coherence, unit_impulse
from matplotlib import pyplot as plt
import mne
from mne.simulation import simulate_raw, add_noise
from mne.datasets import sample
from mne.minimum_norm import make_inverse_operator, apply_inverse
from mne.time_frequency import csd_morlet
from mne.beamformer import make_dics, apply_dics_csd
# We use the MEG and MRI setup from the MNE-sample dataset
data_path = sample.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
# Filenames for various files we'll be using
meg_path = op.join(data_path, 'MEG', 'sample')
raw_fname = op.join(meg_path, 'sample_audvis_raw.fif')
fwd_fname = op.join(meg_path, 'sample_audvis-meg-eeg-oct-6-fwd.fif')
cov_fname = op.join(meg_path, 'sample_audvis-cov.fif')
fwd = mne.read_forward_solution(fwd_fname)
# Seed for the random number generator
rand = np.random.RandomState(42)
###############################################################################
# Data simulation
# ---------------
#
# The following function generates a timeseries that contains an oscillator,
# whose frequency fluctuates a little over time, but stays close to 10 Hz.
# We'll use this function to generate our two signals.
sfreq = 50. # Sampling frequency of the generated signal
n_samp = int(round(10. * sfreq))
times = np.arange(n_samp) / sfreq # 10 seconds of signal
n_times = len(times)
def coh_signal_gen():
"""Generate an oscillating signal.
Returns
-------
signal : ndarray
The generated signal.
"""
t_rand = 0.001 # Variation in the instantaneous frequency of the signal
std = 0.1 # Std-dev of the random fluctuations added to the signal
base_freq = 10. # Base frequency of the oscillators in Hertz
n_times = len(times)
# Generate an oscillator with varying frequency and phase lag.
signal = np.sin(2.0 * np.pi *
(base_freq * np.arange(n_times) / sfreq +
np.cumsum(t_rand * rand.randn(n_times))))
# Add some random fluctuations to the signal.
signal += std * rand.randn(n_times)
# Scale the signal to be in the right order of magnitude (~100 nAm)
# for MEG data.
signal *= 100e-9
return signal
###############################################################################
# Let's simulate two timeseries and plot some basic information about them.
signal1 = coh_signal_gen()
signal2 = coh_signal_gen()
fig, axes = plt.subplots(2, 2, figsize=(8, 4))
# Plot the timeseries
ax = axes[0][0]
ax.plot(times, 1e9 * signal1, lw=0.5)
ax.set(xlabel='Time (s)', xlim=times[[0, -1]], ylabel='Amplitude (Am)',
title='Signal 1')
ax = axes[0][1]
ax.plot(times, 1e9 * signal2, lw=0.5)
ax.set(xlabel='Time (s)', xlim=times[[0, -1]], title='Signal 2')
# Power spectrum of the first timeseries
f, p = welch(signal1, fs=sfreq, nperseg=128, nfft=256)
ax = axes[1][0]
# Only plot the first 100 frequencies
ax.plot(f[:100], 20 * np.log10(p[:100]), lw=1.)
ax.set(xlabel='Frequency (Hz)', xlim=f[[0, 99]],
ylabel='Power (dB)', title='Power spectrum of signal 1')
# Compute the coherence between the two timeseries
f, coh = coherence(signal1, signal2, fs=sfreq, nperseg=100, noverlap=64)
ax = axes[1][1]
ax.plot(f[:50], coh[:50], lw=1.)
ax.set(xlabel='Frequency (Hz)', xlim=f[[0, 49]], ylabel='Coherence',
title='Coherence between the timeseries')
fig.tight_layout()
###############################################################################
# Now we put the signals at two locations on the cortex. We construct a
# :class:`mne.SourceEstimate` object to store them in.
#
# The timeseries will have a part where the signal is active and a part where
# it is not. The techniques we'll be using in this tutorial depend on being
# able to contrast data that contains the signal of interest versus data that
# does not (i.e. it contains only noise).
# The locations on the cortex where the signal will originate from. These
# locations are indicated as vertex numbers.
vertices = [[146374], [33830]]
# Construct SourceEstimates that describe the signals at the cortical level.
data = np.vstack((signal1, signal2))
stc_signal = mne.SourceEstimate(
data, vertices, tmin=0, tstep=1. / sfreq, subject='sample')
stc_noise = stc_signal * 0.
###############################################################################
# Before we simulate the sensor-level data, let's define a signal-to-noise
# ratio. You are encouraged to play with this parameter and see the effect of
# noise on our results.
snr = 1. # Signal-to-noise ratio. Decrease to add more noise.
###############################################################################
# Now we run the signal through the forward model to obtain simulated sensor
# data. To save computation time, we'll only simulate gradiometer data. You can
# try simulating other types of sensors as well.
#
# Some noise is added based on the baseline noise covariance matrix from the
# sample dataset, scaled to implement the desired SNR.
# Read the info from the sample dataset. This defines the location of the
# sensors and such.
info = mne.io.read_info(raw_fname)
info.update(sfreq=sfreq, bads=[])
# Only use gradiometers
picks = mne.pick_types(info, meg='grad', stim=True, exclude=())
mne.pick_info(info, picks, copy=False)
# Define a covariance matrix for the simulated noise. In this tutorial, we use
# a simple diagonal matrix.
cov = mne.cov.make_ad_hoc_cov(info)
cov['data'] *= (20. / snr) ** 2 # Scale the noise to achieve the desired SNR
# Simulate the raw data, with a lowpass filter on the noise
stcs = [(stc_signal, unit_impulse(n_samp, dtype=int) * 1),
(stc_noise, unit_impulse(n_samp, dtype=int) * 2)] # stacked in time
duration = (len(stc_signal.times) * 2) / sfreq
raw = simulate_raw(info, stcs, forward=fwd)
add_noise(raw, cov, iir_filter=[4, -4, 0.8], random_state=rand)
###############################################################################
# We create an :class:`mne.Epochs` object containing two trials: one with
# both noise and signal and one with just noise
events = mne.find_events(raw, initial_event=True)
tmax = (len(stc_signal.times) - 1) / sfreq
epochs = mne.Epochs(raw, events, event_id=dict(signal=1, noise=2),
tmin=0, tmax=tmax, baseline=None, preload=True)
assert len(epochs) == 2 # ensure that we got the two expected events
# Plot some of the channels of the simulated data that are situated above one
# of our simulated sources.
picks = mne.pick_channels(epochs.ch_names, mne.read_selection('Left-frontal'))
epochs.plot(picks=picks)
###############################################################################
# Power mapping
# -------------
# With our simulated dataset ready, we can now pretend to be researchers that
# have just recorded this from a real subject and are going to study what parts
# of the brain communicate with each other.
#
# First, we'll create a source estimate of the MEG data. We'll use both a
# straightforward MNE-dSPM inverse solution for this, and the DICS beamformer
# which is specifically designed to work with oscillatory data.
###############################################################################
# Computing the inverse using MNE-dSPM:
# Compute the inverse operator
fwd = mne.read_forward_solution(fwd_fname)
inv = make_inverse_operator(epochs.info, fwd, cov)
# Apply the inverse model to the trial that also contains the signal.
s = apply_inverse(epochs['signal'].average(), inv)
# Take the root-mean square along the time dimension and plot the result.
s_rms = np.sqrt((s ** 2).mean())
title = 'MNE-dSPM inverse (RMS)'
brain = s_rms.plot('sample', subjects_dir=subjects_dir, hemi='both', figure=1,
size=600, time_label=title, title=title)
# Indicate the true locations of the source activity on the plot.
brain.add_foci(vertices[0][0], coords_as_verts=True, hemi='lh')
brain.add_foci(vertices[1][0], coords_as_verts=True, hemi='rh')
# Rotate the view and add a title.
brain.show_view(view={'azimuth': 0, 'elevation': 0, 'distance': 550,
'focalpoint': [0, 0, 0]})
###############################################################################
# We will now compute the cortical power map at 10 Hz. using a DICS beamformer.
# A beamformer will construct for each vertex a spatial filter that aims to
# pass activity originating from the vertex, while dampening activity from
# other sources as much as possible.
#
# The :func:`mne.beamformer.make_dics` function has many switches that offer
# precise control
# over the way the filter weights are computed. Currently, there is no clear
# consensus regarding the best approach. This is why we will demonstrate two
# approaches here:
#
# 1. The approach as described in [2]_, which first normalizes the forward
# solution and computes a vector beamformer.
# 2. The scalar beamforming approach based on [3]_, which uses weight
# normalization instead of normalizing the forward solution.
# Estimate the cross-spectral density (CSD) matrix on the trial containing the
# signal.
csd_signal = csd_morlet(epochs['signal'], frequencies=[10])
# Compute the spatial filters for each vertex, using two approaches.
filters_approach1 = make_dics(
info, fwd, csd_signal, reg=0.05, pick_ori='max-power', normalize_fwd=True,
inversion='single', weight_norm=None)
print(filters_approach1)
filters_approach2 = make_dics(
info, fwd, csd_signal, reg=0.1, pick_ori='max-power', normalize_fwd=False,
inversion='matrix', weight_norm='unit-noise-gain')
print(filters_approach2)
# You can save these to disk with:
# filters_approach1.save('filters_1-dics.h5')
# Compute the DICS power map by applying the spatial filters to the CSD matrix.
power_approach1, f = apply_dics_csd(csd_signal, filters_approach1)
power_approach2, f = apply_dics_csd(csd_signal, filters_approach2)
# Plot the DICS power maps for both approaches.
for approach, power in enumerate([power_approach1, power_approach2], 1):
title = 'DICS power map, approach %d' % approach
brain = power.plot('sample', subjects_dir=subjects_dir, hemi='both',
figure=approach + 1, size=600, time_label=title,
title=title)
# Indicate the true locations of the source activity on the plot.
brain.add_foci(vertices[0][0], coords_as_verts=True, hemi='lh')
brain.add_foci(vertices[1][0], coords_as_verts=True, hemi='rh')
# Rotate the view and add a title.
brain.show_view(view={'azimuth': 0, 'elevation': 0, 'distance': 550,
'focalpoint': [0, 0, 0]})
###############################################################################
# Excellent! All methods found our two simulated sources. Of course, with a
# signal-to-noise ratio (SNR) of 1, is isn't very hard to find them. You can
# try playing with the SNR and see how the MNE-dSPM and DICS approaches hold up
# in the presence of increasing noise. In the presence of more noise, you may
# need to increase the regularization parameter of the DICS beamformer.
#
# References
# ----------
# .. [1] Gross et al. (2001). Dynamic imaging of coherent sources: Studying
# neural interactions in the human brain. Proceedings of the National
# Academy of Sciences, 98(2), 694-699.
# https://doi.org/10.1073/pnas.98.2.694
# .. [2] van Vliet, et al. (2018) Analysis of functional connectivity and
# oscillatory power using DICS: from raw MEG data to group-level
# statistics in Python. bioRxiv, 245530. https://doi.org/10.1101/245530
# .. [3] Sekihara & Nagarajan. Adaptive spatial filters for electromagnetic
# brain imaging (2008) Springer Science & Business Media
|
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test of Policy Engine For Tacker"""
import fixtures
import mock
import six
from oslo_serialization import jsonutils as json
from oslo_utils import importutils
from six.moves.urllib import request as urlrequest
import tacker
from tacker.api.v1 import attributes
from tacker.common import exceptions
from tacker import context
from tacker import manager
from tacker.openstack.common import policy as common_policy
from tacker import policy
from tacker.tests import base
class PolicyFileTestCase(base.BaseTestCase):
def setUp(self):
super(PolicyFileTestCase, self).setUp()
policy.reset()
self.addCleanup(policy.reset)
self.context = context.Context('fake', 'fake', is_admin=False)
self.target = {}
self.tempdir = self.useFixture(fixtures.TempDir())
def test_modified_policy_reloads(self):
def fake_find_config_file(_1, _2):
return self.tempdir.join('policy')
with mock.patch.object(tacker.common.utils,
'find_config_file',
new=fake_find_config_file):
tmpfilename = fake_find_config_file(None, None)
action = "example:test"
with open(tmpfilename, "w") as policyfile:
policyfile.write("""{"example:test": ""}""")
policy.init()
policy.enforce(self.context, action, self.target)
with open(tmpfilename, "w") as policyfile:
policyfile.write("""{"example:test": "!"}""")
# NOTE(vish): reset stored policy cache so we don't have to
# sleep(1)
policy._POLICY_CACHE = {}
policy.init()
self.assertRaises(exceptions.PolicyNotAuthorized,
policy.enforce,
self.context,
action,
self.target)
class PolicyTestCase(base.BaseTestCase):
def setUp(self):
super(PolicyTestCase, self).setUp()
policy.reset()
self.addCleanup(policy.reset)
# NOTE(vish): preload rules to circumvent reloading from file
policy.init()
rules = {
"true": '@',
"example:allowed": '@',
"example:denied": '!',
"example:get_http": "http:http://www.example.com",
"example:my_file": "role:compute_admin or tenant_id:%(tenant_id)s",
"example:early_and_fail": "! and @",
"example:early_or_success": "@ or !",
"example:lowercase_admin": "role:admin or role:sysadmin",
"example:uppercase_admin": "role:ADMIN or role:sysadmin",
}
# NOTE(vish): then overload underlying rules
common_policy.set_rules(common_policy.Rules(
dict((k, common_policy.parse_rule(v))
for k, v in rules.items())))
self.context = context.Context('fake', 'fake', roles=['member'])
self.target = {}
def test_enforce_nonexistent_action_throws(self):
action = "example:noexist"
self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_enforce_bad_action_throws(self):
action = "example:denied"
self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_check_bad_action_noraise(self):
action = "example:denied"
result = policy.check(self.context, action, self.target)
self.assertEqual(result, False)
def test_check_non_existent_action(self):
action = "example:idonotexist"
result_1 = policy.check(self.context, action, self.target)
self.assertFalse(result_1)
result_2 = policy.check(self.context, action, self.target,
might_not_exist=True)
self.assertTrue(result_2)
def test_enforce_good_action(self):
action = "example:allowed"
result = policy.enforce(self.context, action, self.target)
self.assertEqual(result, True)
def test_enforce_http_true(self):
def fakeurlopen(url, post_data):
return six.StringIO("True")
with mock.patch.object(urlrequest, 'urlopen', new=fakeurlopen):
action = "example:get_http"
target = {}
result = policy.enforce(self.context, action, target)
self.assertEqual(result, True)
def test_enforce_http_false(self):
def fakeurlopen(url, post_data):
return six.StringIO("False")
with mock.patch.object(urlrequest, 'urlopen', new=fakeurlopen):
action = "example:get_http"
target = {}
self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce,
self.context, action, target)
def test_templatized_enforcement(self):
target_mine = {'tenant_id': 'fake'}
target_not_mine = {'tenant_id': 'another'}
action = "example:my_file"
policy.enforce(self.context, action, target_mine)
self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce,
self.context, action, target_not_mine)
def test_early_AND_enforcement(self):
action = "example:early_and_fail"
self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_early_OR_enforcement(self):
action = "example:early_or_success"
policy.enforce(self.context, action, self.target)
def test_ignore_case_role_check(self):
lowercase_action = "example:lowercase_admin"
uppercase_action = "example:uppercase_admin"
# NOTE(dprince) we mix case in the Admin role here to ensure
# case is ignored
admin_context = context.Context('admin', 'fake', roles=['AdMiN'])
policy.enforce(admin_context, lowercase_action, self.target)
policy.enforce(admin_context, uppercase_action, self.target)
class DefaultPolicyTestCase(base.BaseTestCase):
def setUp(self):
super(DefaultPolicyTestCase, self).setUp()
policy.reset()
policy.init()
self.addCleanup(policy.reset)
self.rules = {
"default": '',
"example:exist": '!',
}
self._set_rules('default')
self.context = context.Context('fake', 'fake')
def _set_rules(self, default_rule):
rules = common_policy.Rules(
dict((k, common_policy.parse_rule(v))
for k, v in self.rules.items()), default_rule)
common_policy.set_rules(rules)
def test_policy_called(self):
self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce,
self.context, "example:exist", {})
def test_not_found_policy_calls_default(self):
policy.enforce(self.context, "example:noexist", {})
def test_default_not_found(self):
self._set_rules("default_noexist")
self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce,
self.context, "example:noexist", {})
FAKE_RESOURCE_NAME = 'something'
FAKE_RESOURCE = {"%ss" % FAKE_RESOURCE_NAME:
{'attr': {'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': None,
'enforce_policy': True,
'validate': {'type:dict':
{'sub_attr_1': {'type:string': None},
'sub_attr_2': {'type:string': None}}}
}}}
class TackerPolicyTestCase(base.BaseTestCase):
def setUp(self):
super(TackerPolicyTestCase, self).setUp()
policy.reset()
policy.init()
self.addCleanup(policy.reset)
self.admin_only_legacy = "role:admin"
self.admin_or_owner_legacy = "role:admin or tenant_id:%(tenant_id)s"
# Add a Fake 'something' resource to RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP.update(FAKE_RESOURCE)
self.rules = dict((k, common_policy.parse_rule(v)) for k, v in {
"context_is_admin": "role:admin",
"admin_or_network_owner": "rule:context_is_admin or "
"tenant_id:%(network:tenant_id)s",
"admin_or_owner": ("rule:context_is_admin or "
"tenant_id:%(tenant_id)s"),
"admin_only": "rule:context_is_admin",
"regular_user": "role:user",
"shared": "field:networks:shared=True",
"external": "field:networks:router:external=True",
"default": '@',
"create_network": "rule:admin_or_owner",
"create_network:shared": "rule:admin_only",
"update_network": '@',
"update_network:shared": "rule:admin_only",
"get_network": "rule:admin_or_owner or "
"rule:shared or "
"rule:external",
"create_port:mac": "rule:admin_or_network_owner",
"create_something": "rule:admin_or_owner",
"create_something:attr": "rule:admin_or_owner",
"create_something:attr:sub_attr_1": "rule:admin_or_owner",
"create_something:attr:sub_attr_2": "rule:admin_only",
"get_firewall_policy": "rule:admin_or_owner or "
"rule:shared",
"get_firewall_rule": "rule:admin_or_owner or "
"rule:shared"
}.items())
def fakepolicyinit():
common_policy.set_rules(common_policy.Rules(self.rules))
def remove_fake_resource():
del attributes.RESOURCE_ATTRIBUTE_MAP["%ss" % FAKE_RESOURCE_NAME]
self.patcher = mock.patch.object(tacker.policy,
'init',
new=fakepolicyinit)
self.patcher.start()
self.addCleanup(remove_fake_resource)
self.context = context.Context('fake', 'fake', roles=['user'])
plugin_klass = importutils.import_class(
"tacker.db.db_base_plugin_v2.TackerDbPluginV2")
self.manager_patcher = mock.patch('tacker.manager.TackerManager')
fake_manager = self.manager_patcher.start()
fake_manager_instance = fake_manager.return_value
fake_manager_instance.plugin = plugin_klass()
def _test_action_on_attr(self, context, action, attr, value,
exception=None):
action = "%s_network" % action
target = {'tenant_id': 'the_owner', attr: value}
if exception:
self.assertRaises(exception, policy.enforce,
context, action, target)
else:
result = policy.enforce(context, action, target)
self.assertEqual(result, True)
def _test_nonadmin_action_on_attr(self, action, attr, value,
exception=None):
user_context = context.Context('', "user", roles=['user'])
self._test_action_on_attr(user_context, action, attr,
value, exception)
def test_nonadmin_write_on_private_fails(self):
self._test_nonadmin_action_on_attr('create', 'shared', False,
exceptions.PolicyNotAuthorized)
def test_nonadmin_read_on_private_fails(self):
self._test_nonadmin_action_on_attr('get', 'shared', False,
exceptions.PolicyNotAuthorized)
def test_nonadmin_write_on_shared_fails(self):
self._test_nonadmin_action_on_attr('create', 'shared', True,
exceptions.PolicyNotAuthorized)
def test_nonadmin_read_on_shared_succeeds(self):
self._test_nonadmin_action_on_attr('get', 'shared', True)
def _test_enforce_adminonly_attribute(self, action):
admin_context = context.get_admin_context()
target = {'shared': True}
result = policy.enforce(admin_context, action, target)
self.assertEqual(result, True)
def test_enforce_adminonly_attribute_create(self):
self._test_enforce_adminonly_attribute('create_network')
def test_enforce_adminonly_attribute_update(self):
self._test_enforce_adminonly_attribute('update_network')
def test_enforce_adminonly_attribute_no_context_is_admin_policy(self):
del self.rules[policy.ADMIN_CTX_POLICY]
self.rules['admin_only'] = common_policy.parse_rule(
self.admin_only_legacy)
self.rules['admin_or_owner'] = common_policy.parse_rule(
self.admin_or_owner_legacy)
self._test_enforce_adminonly_attribute('create_network')
def test_enforce_adminonly_attribute_nonadminctx_returns_403(self):
action = "create_network"
target = {'shared': True, 'tenant_id': 'somebody_else'}
self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce,
self.context, action, target)
def test_enforce_adminonly_nonadminctx_no_ctx_is_admin_policy_403(self):
del self.rules[policy.ADMIN_CTX_POLICY]
self.rules['admin_only'] = common_policy.parse_rule(
self.admin_only_legacy)
self.rules['admin_or_owner'] = common_policy.parse_rule(
self.admin_or_owner_legacy)
action = "create_network"
target = {'shared': True, 'tenant_id': 'somebody_else'}
self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce,
self.context, action, target)
def _test_build_subattribute_match_rule(self, validate_value):
bk = FAKE_RESOURCE['%ss' % FAKE_RESOURCE_NAME]['attr']['validate']
FAKE_RESOURCE['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = (
validate_value)
action = "create_something"
target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}}
self.assertFalse(policy._build_subattr_match_rule(
'attr',
FAKE_RESOURCE['%ss' % FAKE_RESOURCE_NAME]['attr'],
action,
target))
FAKE_RESOURCE['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = bk
def test_build_subattribute_match_rule_empty_dict_validator(self):
self._test_build_subattribute_match_rule({})
def test_build_subattribute_match_rule_wrong_validation_info(self):
self._test_build_subattribute_match_rule(
{'type:dict': 'wrong_stuff'})
def test_enforce_subattribute(self):
action = "create_something"
target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}}
result = policy.enforce(self.context, action, target, None)
self.assertEqual(result, True)
def test_enforce_admin_only_subattribute(self):
action = "create_something"
target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x',
'sub_attr_2': 'y'}}
result = policy.enforce(context.get_admin_context(),
action, target, None)
self.assertEqual(result, True)
def test_enforce_admin_only_subattribute_nonadminctx_returns_403(self):
action = "create_something"
target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x',
'sub_attr_2': 'y'}}
self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce,
self.context, action, target, None)
def test_enforce_regularuser_on_read(self):
action = "get_network"
target = {'shared': True, 'tenant_id': 'somebody_else'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_firewall_policy_shared(self):
action = "get_firewall_policy"
target = {'shared': True, 'tenant_id': 'somebody_else'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_firewall_rule_shared(self):
action = "get_firewall_rule"
target = {'shared': True, 'tenant_id': 'somebody_else'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_tenant_id_check(self):
# Trigger a policy with rule admin_or_owner
action = "create_network"
target = {'tenant_id': 'fake'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_tenant_id_check_parent_resource(self):
def fakegetnetwork(*args, **kwargs):
return {'tenant_id': 'fake'}
action = "create_port:mac"
with mock.patch.object(manager.TackerManager.get_instance().plugin,
'get_network', new=fakegetnetwork):
target = {'network_id': 'whatever'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_plugin_failure(self):
def fakegetnetwork(*args, **kwargs):
raise NotImplementedError('Blast!')
# the policy check and plugin method we use in this test are irrelevant
# so long that we verify that, if *f* blows up, the behavior of the
# policy engine to propagate the exception is preserved
action = "create_port:mac"
with mock.patch.object(manager.TackerManager.get_instance().plugin,
'get_network', new=fakegetnetwork):
target = {'network_id': 'whatever'}
self.assertRaises(NotImplementedError,
policy.enforce,
self.context,
action,
target)
def test_enforce_tenant_id_check_parent_resource_bw_compatibility(self):
def fakegetnetwork(*args, **kwargs):
return {'tenant_id': 'fake'}
del self.rules['admin_or_network_owner']
self.rules['admin_or_network_owner'] = common_policy.parse_rule(
"role:admin or tenant_id:%(network_tenant_id)s")
action = "create_port:mac"
with mock.patch.object(manager.TackerManager.get_instance().plugin,
'get_network', new=fakegetnetwork):
target = {'network_id': 'whatever'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_tenant_id_check_no_target_field_raises(self):
# Try and add a bad rule
self.assertRaises(
exceptions.PolicyInitError,
common_policy.parse_rule,
'tenant_id:(wrong_stuff)')
def _test_enforce_tenant_id_raises(self, bad_rule):
self.rules['admin_or_owner'] = common_policy.parse_rule(bad_rule)
# Trigger a policy with rule admin_or_owner
action = "create_network"
target = {'tenant_id': 'fake'}
policy.init()
self.assertRaises(exceptions.PolicyCheckError,
policy.enforce,
self.context, action, target)
def test_enforce_tenant_id_check_malformed_target_field_raises(self):
self._test_enforce_tenant_id_raises('tenant_id:%(malformed_field)s')
def test_enforce_tenant_id_check_invalid_parent_resource_raises(self):
self._test_enforce_tenant_id_raises('tenant_id:%(foobaz_tenant_id)s')
def test_get_roles_context_is_admin_rule_missing(self):
rules = dict((k, common_policy.parse_rule(v)) for k, v in {
"some_other_rule": "role:admin",
}.items())
common_policy.set_rules(common_policy.Rules(rules))
# 'admin' role is expected for bw compatibility
self.assertEqual(['admin'], policy.get_admin_roles())
def test_get_roles_with_role_check(self):
rules = dict((k, common_policy.parse_rule(v)) for k, v in {
policy.ADMIN_CTX_POLICY: "role:admin",
}.items())
common_policy.set_rules(common_policy.Rules(rules))
self.assertEqual(['admin'], policy.get_admin_roles())
def test_get_roles_with_rule_check(self):
rules = dict((k, common_policy.parse_rule(v)) for k, v in {
policy.ADMIN_CTX_POLICY: "rule:some_other_rule",
"some_other_rule": "role:admin",
}.items())
common_policy.set_rules(common_policy.Rules(rules))
self.assertEqual(['admin'], policy.get_admin_roles())
def test_get_roles_with_or_check(self):
self.rules = dict((k, common_policy.parse_rule(v)) for k, v in {
policy.ADMIN_CTX_POLICY: "rule:rule1 or rule:rule2",
"rule1": "role:admin_1",
"rule2": "role:admin_2"
}.items())
self.assertEqual(['admin_1', 'admin_2'],
policy.get_admin_roles())
def test_get_roles_with_other_rules(self):
self.rules = dict((k, common_policy.parse_rule(v)) for k, v in {
policy.ADMIN_CTX_POLICY: "role:xxx or other:value",
}.items())
self.assertEqual(['xxx'], policy.get_admin_roles())
def _test_set_rules_with_deprecated_policy(self, input_rules,
expected_rules):
policy._set_rules(json.dumps(input_rules))
# verify deprecated policy has been removed
for pol in input_rules.keys():
self.assertNotIn(pol, common_policy._rules)
# verify deprecated policy was correctly translated. Iterate
# over items for compatibility with unittest2 in python 2.6
for rule in expected_rules:
self.assertIn(rule, common_policy._rules)
self.assertEqual(str(common_policy._rules[rule]),
expected_rules[rule])
def test_set_rules_with_deprecated_view_policy(self):
self._test_set_rules_with_deprecated_policy(
{'extension:router:view': 'rule:admin_or_owner'},
{'get_network:router:external': 'rule:admin_or_owner'})
def test_set_rules_with_deprecated_set_policy(self):
expected_policies = ['create_network:provider:network_type',
'create_network:provider:physical_network',
'create_network:provider:segmentation_id',
'update_network:provider:network_type',
'update_network:provider:physical_network',
'update_network:provider:segmentation_id']
self._test_set_rules_with_deprecated_policy(
{'extension:provider_network:set': 'rule:admin_only'},
dict((policy, 'rule:admin_only') for policy in
expected_policies))
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: swift_conn
# You'll see swift_conn passed around a few places in this file. This is the
# source httplib connection of whatever it is attached to.
# It is used when early termination of reading from the connection should
# happen, such as when a range request is satisfied but there's still more the
# source connection would like to send. To prevent having to read all the data
# that could be left, the source connection can be .close() and then reads
# commence to empty out any buffers.
# These shenanigans are to ensure all related objects can be garbage
# collected. We've seen objects hang around forever otherwise.
import mimetypes
import os
from swift import gettext_ as _
from random import shuffle
from time import time
from eventlet import Timeout
from swift.common.ring import Ring, FingerRing
from swift.common.utils import cache_from_env, get_logger, \
get_remote_client, split_path, config_true_value, generate_trans_id, \
affinity_key_function, affinity_locality_predicate
from swift.common.constraints import check_utf8
from swift.proxy.controllers import AccountController, ObjectController, \
ContainerController
from swift.common.swob import HTTPBadRequest, HTTPForbidden, \
HTTPMethodNotAllowed, HTTPNotFound, HTTPPreconditionFailed, \
HTTPServerError, HTTPException, Request
class Application(object):
"""WSGI application for the proxy server."""
def __init__(self, conf, memcache=None, logger=None, account_ring=None,
container_ring=None, object_ring=None, storage_ring=None):
if conf is None:
conf = {}
if logger is None:
self.logger = get_logger(conf, log_route='proxy-server')
else:
self.logger = logger
swift_dir = conf.get('swift_dir', '/etc/swift')
self.node_timeout = int(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.client_timeout = int(conf.get('client_timeout', 60))
self.put_queue_depth = int(conf.get('put_queue_depth', 10))
self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
self.trans_id_suffix = conf.get('trans_id_suffix', '')
self.error_suppression_interval = \
int(conf.get('error_suppression_interval', 60))
self.error_suppression_limit = \
int(conf.get('error_suppression_limit', 10))
self.recheck_container_existence = \
int(conf.get('recheck_container_existence', 60))
self.recheck_account_existence = \
int(conf.get('recheck_account_existence', 60))
self.allow_account_management = \
config_true_value(conf.get('allow_account_management', 'no'))
self.object_post_as_copy = \
config_true_value(conf.get('object_post_as_copy', 'true'))
try:
self.storage_ring = storage_ring or FingerRing(swift_dir, ring_name='storage')
except IOError:
self.storage_ring = None
self.storage_redirect = self.storage_ring and config_true_value(conf.get('storage_redirect', 'true'))
self.object_ring = object_ring or Ring(swift_dir, ring_name='object')
self.container_ring = container_ring or Ring(swift_dir,
ring_name='container')
self.account_ring = account_ring or Ring(swift_dir,
ring_name='account')
self.memcache = memcache
mimetypes.init(mimetypes.knownfiles +
[os.path.join(swift_dir, 'mime.types')])
self.account_autocreate = \
config_true_value(conf.get('account_autocreate', 'no'))
self.expiring_objects_account = \
(conf.get('auto_create_account_prefix') or '.') + \
'expiring_objects'
self.expiring_objects_container_divisor = \
int(conf.get('expiring_objects_container_divisor') or 86400)
self.max_containers_per_account = \
int(conf.get('max_containers_per_account') or 0)
self.max_containers_whitelist = [
a.strip()
for a in conf.get('max_containers_whitelist', '').split(',')
if a.strip()]
self.deny_host_headers = [
host.strip() for host in
conf.get('deny_host_headers', '').split(',') if host.strip()]
self.rate_limit_after_segment = \
int(conf.get('rate_limit_after_segment', 10))
self.rate_limit_segments_per_sec = \
int(conf.get('rate_limit_segments_per_sec', 1))
self.log_handoffs = config_true_value(conf.get('log_handoffs', 'true'))
self.cors_allow_origin = [
a.strip()
for a in conf.get('cors_allow_origin', '').split(',')
if a.strip()]
self.node_timings = {}
self.timing_expiry = int(conf.get('timing_expiry', 300))
self.sorting_method = conf.get('sorting_method', 'shuffle').lower()
self.allow_static_large_object = config_true_value(
conf.get('allow_static_large_object', 'true'))
self.max_large_object_get_time = float(
conf.get('max_large_object_get_time', '86400'))
value = conf.get('request_node_count', '2 * replicas').lower().split()
if len(value) == 1:
value = int(value[0])
self.request_node_count = lambda r: value
elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
value = int(value[0])
self.request_node_count = lambda r: value * r.replica_count
else:
raise ValueError(
'Invalid request_node_count value: %r' % ''.join(value))
try:
read_affinity = conf.get('read_affinity', '')
self.read_affinity_sort_key = affinity_key_function(read_affinity)
except ValueError as err:
# make the message a little more useful
raise ValueError("Invalid read_affinity value: %r (%s)" %
(read_affinity, err.message))
try:
write_affinity = conf.get('write_affinity', '')
self.write_affinity_is_local_fn \
= affinity_locality_predicate(write_affinity)
except ValueError as err:
# make the message a little more useful
raise ValueError("Invalid write_affinity value: %r (%s)" %
(write_affinity, err.message))
value = conf.get('write_affinity_node_count',
'2 * replicas').lower().split()
if len(value) == 1:
value = int(value[0])
self.write_affinity_node_count = lambda r: value
elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
value = int(value[0])
self.write_affinity_node_count = lambda r: value * r.replica_count
else:
raise ValueError(
'Invalid write_affinity_node_count value: %r' % ''.join(value))
swift_owner_headers = conf.get(
'swift_owner_headers',
'x-container-read, x-container-write, '
'x-container-sync-key, x-container-sync-to, '
'x-account-meta-temp-url-key, x-account-meta-temp-url-key-2')
self.swift_owner_headers = [
name.strip()
for name in swift_owner_headers.split(',') if name.strip()]
def get_controller(self, path):
"""
Get the controller to handle a request.
:param path: path from request
:returns: tuple of (controller class, path dictionary)
:raises: ValueError (thrown by split_path) if given invalid path
"""
version, account, container, obj = split_path(path, 1, 4, True)
d = dict(version=version,
account_name=account,
container_name=container,
object_name=obj)
if obj and container and account:
return ObjectController, d
elif container and account:
return ContainerController, d
elif account and not container and not obj:
return AccountController, d
return None, d
def __call__(self, env, start_response):
"""
WSGI entry point.
Wraps env in swob.Request object and passes it down.
:param env: WSGI environment dictionary
:param start_response: WSGI callable
"""
try:
if self.memcache is None:
self.memcache = cache_from_env(env)
req = self.update_request(Request(env))
return self.handle_request(req)(env, start_response)
except UnicodeError:
err = HTTPPreconditionFailed(
request=req, body='Invalid UTF8 or contains NULL')
return err(env, start_response)
except (Exception, Timeout):
start_response('500 Server Error',
[('Content-Type', 'text/plain')])
return ['Internal server error.\n']
def update_request(self, req):
if 'x-storage-token' in req.headers and \
'x-auth-token' not in req.headers:
req.headers['x-auth-token'] = req.headers['x-storage-token']
return req
def handle_request(self, req):
"""
Entry point for proxy server.
Should return a WSGI-style callable (such as swob.Response).
:param req: swob.Request object
"""
try:
self.logger.set_statsd_prefix('proxy-server')
if req.content_length and req.content_length < 0:
self.logger.increment('errors')
return HTTPBadRequest(request=req,
body='Invalid Content-Length')
try:
if not check_utf8(req.path_info):
self.logger.increment('errors')
return HTTPPreconditionFailed(
request=req, body='Invalid UTF8 or contains NULL')
except UnicodeError:
self.logger.increment('errors')
return HTTPPreconditionFailed(
request=req, body='Invalid UTF8 or contains NULL')
try:
controller, path_parts = self.get_controller(req.path)
p = req.path_info
if isinstance(p, unicode):
p = p.encode('utf-8')
except ValueError:
self.logger.increment('errors')
return HTTPNotFound(request=req)
if not controller:
self.logger.increment('errors')
return HTTPPreconditionFailed(request=req, body='Bad URL')
if self.deny_host_headers and \
req.host.split(':')[0] in self.deny_host_headers:
return HTTPForbidden(request=req, body='Invalid host header')
self.logger.set_statsd_prefix('proxy-server.' +
controller.server_type.lower())
controller = controller(self, **path_parts)
if 'swift.trans_id' not in req.environ:
# if this wasn't set by an earlier middleware, set it now
trans_id = generate_trans_id(self.trans_id_suffix)
req.environ['swift.trans_id'] = trans_id
self.logger.txn_id = trans_id
req.headers['x-trans-id'] = req.environ['swift.trans_id']
controller.trans_id = req.environ['swift.trans_id']
self.logger.client_ip = get_remote_client(req)
try:
handler = getattr(controller, req.method)
getattr(handler, 'publicly_accessible')
except AttributeError:
allowed_methods = getattr(controller, 'allowed_methods', set())
return HTTPMethodNotAllowed(
request=req, headers={'Allow': ', '.join(allowed_methods)})
if path_parts['version']:
req.path_info_pop()
if 'swift.authorize' in req.environ:
# We call authorize before the handler, always. If authorized,
# we remove the swift.authorize hook so isn't ever called
# again. If not authorized, we return the denial unless the
# controller's method indicates it'd like to gather more
# information and try again later.
resp = req.environ['swift.authorize'](req)
if not resp:
# No resp means authorized, no delayed recheck required.
del req.environ['swift.authorize']
else:
# Response indicates denial, but we might delay the denial
# and recheck later. If not delayed, return the error now.
if not getattr(handler, 'delay_denial', None):
return resp
# Save off original request method (GET, POST, etc.) in case it
# gets mutated during handling. This way logging can display the
# method the client actually sent.
req.environ['swift.orig_req_method'] = req.method
return handler(req)
except HTTPException as error_response:
return error_response
except (Exception, Timeout):
self.logger.exception(_('ERROR Unhandled exception in request'))
return HTTPServerError(request=req)
def sort_nodes(self, nodes):
'''
Sorts nodes in-place (and returns the sorted list) according to
the configured strategy. The default "sorting" is to randomly
shuffle the nodes. If the "timing" strategy is chosen, the nodes
are sorted according to the stored timing data.
'''
# In the case of timing sorting, shuffling ensures that close timings
# (ie within the rounding resolution) won't prefer one over another.
# Python's sort is stable (http://wiki.python.org/moin/HowTo/Sorting/)
shuffle(nodes)
if self.sorting_method == 'timing':
now = time()
def key_func(node):
timing, expires = self.node_timings.get(node['ip'], (-1.0, 0))
return timing if expires > now else -1.0
nodes.sort(key=key_func)
elif self.sorting_method == 'affinity':
nodes.sort(key=self.read_affinity_sort_key)
return nodes
def set_node_timing(self, node, timing):
if self.sorting_method != 'timing':
return
now = time()
timing = round(timing, 3) # sort timings to the millisecond
self.node_timings[node['ip']] = (timing, now + self.timing_expiry)
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI proxy apps."""
conf = global_conf.copy()
conf.update(local_conf)
return Application(conf)
|
|
# Copyright (c) 2011 - 2017, Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""``dev_chef_server.py``
`Chef Server host device related functionality`
"""
import os
import time
from . import clissh
from . import entry_template
from . import loggers
class GenericChefServerHost(entry_template.GenericEntry):
"""Generic Chef Server host pattern class.
"""
class_logger = loggers.ClassLogger()
ipaddr = None
ssh_user = None
ssh_pass = None
ssh_port = 22
def __init__(self, config, opts):
"""Initialize GenericChefServerHost class.
"""
super(GenericChefServerHost, self).__init__(config, opts)
self.name = config.get('name', "noname")
self.id = config['id']
self.type = config['instance_type']
self.ipaddr = str(config.get('ip_host', self.__class__.ipaddr))
self.ssh_port = int(config.get('ip_port', self.__class__.ssh_port))
self.ssh_user = str(config.get('username', self.__class__.ssh_user))
self.ssh_pass = str(config.get('password', self.__class__.ssh_pass))
self.chef_repo_path = config['installed_path']
self.roles_list = []
if self.ipaddr and self.ssh_user and self.ssh_pass:
self.ssh = clissh.CLISSH(self.ipaddr, self.ssh_port, self.ssh_user, self.ssh_pass)
self.class_logger.info("Init Chef Server: {}".format(self.ipaddr))
self.config = config
self.opts = opts
self.status = False
def exec_cmd(self, command, from_repo_root=True, check_root=True, timeout=None):
"""Exec shell command with root privileges and print warning message in case StdErr isn't empty.
Args:
command(str): Command to be executed
from_repo_root(bool): Directory chef-repo resides in
check_root(bool): Notify user has admin privileges
timeout(int): Max command execution time on chef server
Returns:
tuple (stdout, stderr, return code)
Examples::
env.chef[1].exec_cmd('ls -la')
"""
if check_root:
if self.ssh_user != "root":
command = "sudo " + command
if from_repo_root:
command = "cd '{0}'; {1}".format(self.chef_repo_path, command)
cmd_status = self.ssh.exec_command(command, timeout=timeout)
# Delay, to stabilize calls
time.sleep(1)
return cmd_status
def start(self, wait_on=True):
"""Mandatory method for environment specific classes.
"""
self.ssh.login(timeout=25)
def stop(self, with_cleanup=True):
"""Mandatory method for environment specific classes.
"""
self.ssh.close()
def cleanup(self):
"""Remove created configuration.
"""
pass
def create(self):
"""Start Chef server or get running one.
Notes:
This is mandatory method for all environment classes.
Also self.opts.get_only attribute affects logic of this method.
get_only is set in py.test command line options (read py.test --help for more information).
"""
self.start()
self.status = True
def destroy(self):
"""Stop or release Chef server.
Notes:
This is mandatory method for all environment classes.
Also self.opts.leave_on and get_only attributes affect logic of this method.
leave_on and get_only are set in py.test command line options (read py.test --help for more information).
"""
if not self.status:
self.class_logger.info("Skip id:{}({}) destroying because it already "
"has Off status.".format(self.id, self.name))
return
self.stop()
self.sanitize()
def sanitize(self):
"""Perform any necessary operations to leave environment in normal state.
"""
pass
def check(self):
"""Mandatory method for environment specific classes.
"""
pass
def set_role(self, src):
"""Put role file to Chef server chef-repo/roles dir and add it to Chef database.
"""
dst = os.path.join(self.chef_repo_path, 'roles', os.path.split(src)[-1])
self.class_logger.debug("Transfer generated role file to chef server.")
self.ssh.put_file(src, dst, proto="sftp")
self.exec_cmd("knife role from file '{}'".format(dst))
self.roles_list.append(dst)
def set_run_list(self, role_file, fqdn_hostname):
"""Set chosen JSON role file as target node run list.
"""
_cmd = 'knife node run_list set {} "role[{}]"'.format(fqdn_hostname,
os.path.splitext(role_file)[0])
self.exec_cmd(_cmd)
def bootstrap_node(self, switch_config, timeout=90):
"""Install chef client on target device.
"""
_cmd = "cd '{}'; knife bootstrap {} -V --bootstrap-template {} --environment {}".format(
self.chef_repo_path, switch_config['ip_host'],
self.config['distro'], self.config['environment'])
_alter = [('assword:', switch_config['sshtun_pass'], False, True)]
self.class_logger.info("Install chef client on device '{}'.".format(switch_config['name']))
self.ssh.open_shell()
self.ssh.shell_command(_cmd, alternatives=_alter, timeout=timeout)
self.ssh.close_shell()
def remove_role(self):
"""Cleanup generated role files on chef server.
"""
self.class_logger.info("Perform cleanup on chef server.")
for x in self.roles_list:
self.exec_cmd("knife role delete '{}' -y".format(
os.path.splitext(os.path.split(x)[-1])[0]))
# Remove role files generated during test run
self.exec_cmd("rm -f -- '{}'".format(x))
def delete_node(self, fqdn_hostname):
"""Delete node from chef database.
"""
self.exec_cmd('knife node delete -y {}'.format(fqdn_hostname))
self.exec_cmd('knife client delete -y {}'.format(fqdn_hostname))
ENTRY_TYPE = "chef-settings"
INSTANCES = {"chef": GenericChefServerHost}
NAME = "chef"
|
|
import os
import subprocess
import re
objdump_path = "/usr/i386-linux-cgc/bin/objdump"
objdump_options = ["-d", "--insn-width=20"] #we use a long insn width so we can figure out the length of the insn
objdump_header_options = ["-h"]
dump_ext = ".dump"
bin_path = "bin"
build_path = "build"
cb = "YAN01_00016"
cb_path = os.path.join(bin_path, cb)
cb_dump_path = os.path.join(build_path, cb + dump_ext)
patched_path = os.path.join(bin_path, cb + "_patched")
patched_dump_path = os.path.join(build_path, cb + "_patched" + dump_ext)
def getDumps(inFile_path, outFile_path, dumpCmd_path, dumpCmd_options, bForce = False) :
if bForce or not os.path.exists(outFile_path) :
outFile = open(outFile_path, "w")
ret = 0
try :
ret = subprocess.call([dumpCmd_path] + dumpCmd_options + [inFile_path], stdout=outFile)
except OSError :
ret = -1
outFile.close()
if ret != 0 :
os.remove(outFile_path)
def getObjdumps(bForce = False) :
getDumps(cb_path, cb_dump_path, objdump_path, objdump_options, bForce)
getDumps(patched_path, patched_dump_path, objdump_path, objdump_options, bForce)
header_ext = ".header"
cb_header_path = os.path.join(build_path, cb + header_ext)
patched_header_path = os.path.join(build_path, cb + "_patched" + header_ext)
def getObjdumpHeaders(bForce = False) :
getDumps(cb_path, cb_header_path, objdump_path, objdump_header_options, bForce)
getDumps(patched_path, patched_header_path, objdump_path, objdump_header_options, bForce)
diff_ext = ".diff"
diffFile_path = os.path.join(build_path, cb + diff_ext)
def getDiffFile(bForce = False) :
if os.path.exists(diffFile_path) and not bForce:
return
if not os.path.exists(cb_dump_path) or not os.path.exists(patched_dump_path) or bForce:
getObjdumps(bForce)
assert os.path.exists(cb_dump_path) and os.path.exists(patched_dump_path)
outFile = open(diffFile_path, "w")
ret = 0
try :
ret = subprocess.call(["/usr/bin/diff", cb_dump_path, patched_dump_path], stdout=outFile)
except OSError :
ret = -1
outFile.close()
#if ret != 0 :
if ret != 1 : #seems like diff returns 1 on success?????
os.remove(diffFile_path)
offsets_ext = ".off"
offsets_path = os.path.join(build_path, cb + offsets_ext)
def processDiffToOffsets(out_path, df_path, getDiffFunc, bForce = False) :
if os.path.exists(out_path) and not bForce :
return #nothing to do
if not os.path.exists(df_path) or bForce:
getDiffFunc(bForce)
assert os.path.exists(df_path)
#now that we have the diff file, lets just process it
outFile = open(out_path, "w")
for l in open(df_path) :
mat = re.match("[\<\>]\s+([0-9a-fA-F]+):\s+([0-9a-fA-F ]+)\t", l)
if mat :
print "MATCH: " + l
outFile.write(mat.group(1) + ':' + mat.group(2) + '\n')
else :
print "NO MATCH: " + l
pass
outFile.close()
hexdump_path = "hexdump"
hexdump_options = ['-e', '" %08_ax:" 16/1 " %02x""\t\n"'] #address then colon then the raw bytes -similar to objdump output (notice the \t)
hd_diff_ext = ".hddiff"
hd_ext = ".hexdump"
hd_offsets_ext = ".hdoff"
hd_offsets_path = os.path.join(build_path, cb + hd_offsets_ext)
hd_diffFile_path = os.path.join(build_path, cb + hd_diff_ext)
cb_hd_path = os.path.join(build_path, cb + hd_ext)
patched_hd_path = os.path.join(build_path, cb + "_patched" + hd_ext)
def getHexdumps(bForce = False) :
getDumps(cb_path, cb_hd_path, hexdump_path, hexdump_options, bForce)
getDumps(patched_path, patched_hd_path, hexdump_path, hexdump_options, bForce)
def getHDDiffFile(bForce = False) :
if os.path.exists(hd_diffFile_path) and not bForce :
return
if not os.path.exists(cb_hd_path) or not os.path.exists(patched_hd_path) or bForce :
getHexdumps(bForce)
assert os.path.exists(cb_hd_path) and os.path.exists(patched_hd_path)
outFile = open(hd_diffFile_path, "w")
ret = 0
try :
ret = subprocess.call(["/usr/bin/diff", cb_hd_path, patched_hd_path], stdout=outFile)
except OSError :
ret = -1
outFile.close()
#if ret != 0 :
if ret != 1 : #seems like diff returns 1 on success?????
os.remove(hd_diffFile_path)
def loadOffsets(off_path, diff_path, getDiffFunc, bForce = False) :
processDiffToOffsets(off_path, diff_path, getDiffFunc, bForce)
try :
if os.path.getsize(off_path) == 0 : #there MUST be differences
processDiffToOffsets(off_path, diff_path, getDiffFunc, True)
except OSError : #we get an OSError if the file doesn't exist
pass #nothing to do because we will check on the file afterwards
assert os.path.exists(off_path)
ret = {}
#now that we have the offsets file, just read it into a dict and return
i = 1
for l in open(off_path) :
elems = l.split(':')
if len(elems) != 2 :
print "WARNING: Can't Process this line @ %u [%s]" % (i, l)
else :
k = elems[0]
v = len("".join(elems[1].split())) / 2
if k not in ret :
ret[k] = v
else :
if v > ret[k] :
ret[k] = v
i += 1
return ret
def loadObjdumpOffsets(bForce = False) :
return loadOffsets(offsets_path, diffFile_path, getDiffFile, bForce)
def loadHexdumpOffsets(bForce = False) :
return loadOffsets(hd_offsets_path, hd_diffFile_path, getHDDiffFile, bForce)
def getHexdumpDifferences(bForce = False) :
processDiffToOffsets(hd_offsets_path, hd_diffFile_path, getHDDiffFile, bForce)
try :
if os.path.getsize(hd_offsets_path) == 0 : #there MUST be differences
processDiffToOffsets(hd_offsets_path, hd_diffFile_path, getHDDiffFile, True)
except OSError : #we get an OSError if the file doesn't exist
pass #nothing to do because we will check on the file afterwards
assert os.path.exists(hd_offsets_path)
ret = {}
curOffsets = {}
#now that we have the offsets file, just read it into a dict and return
i = 1
for l in open(hd_offsets_path) :
elems = l.split(':')
if len(elems) != 2 :
print "WARNING: Can't Process this line @ %u [%s]" % (i, l)
else :
k = elems[0]
v = elems[1]
if k not in curOffsets :
curOffsets[k] = v
else : #since it already exists, lets go through the values to see which bytes are different
ki = int(k,16) #let this except and die if necessary
va = v.split()
ca = curOffsets[k].split()
assert len(va) == len(ca) #both should be 16 bytes...? what if its at the end? - will handle it later perhaps
j = 0
bCurDiff = False
baseAddr = ki
endAddr = ki
while j < len(va) :
if va[j] != ca[j] :
if bCurDiff :
#if they are still different then update the end Addr
#endAddr += 1 #already done automatically
pass
else :
#if they are different, but didn't used to be then update the base addr and set the flag
baseAddr = ki + j
bCurDiff = True
else :
#now if they are the same
if bCurDiff :
#if they used to be different, then that means we have new range so add to ret
ret[baseAddr] = endAddr - baseAddr
baseAddr = endAddr
else :
#if they used to be the same, then just update the base addr
baseAddr += 1
bCurDiff = False
j += 1
endAddr += 1
#if we are all the way here then we need to make sure that there wasn't some difference
# we missed at the end of the line
if bCurDiff :
ret[baseAddr] = endAddr - baseAddr
i += 1
return ret
def parseHeaderOffsets(filename) :
temp = {}
for l in open(filename) :
mat = re.match("\s+\d+\s+(.[\w]+)\s+([0-9a-fA-F]+)\s+([0-9a-fA-F]+)\s+[0-9a-fA-F]+\s+([0-9a-fA-F]+)", l)
if mat :
if mat.group(1) in temp :
print "WARNING: Somehow the section [%s] already exists?" % mat.group(1)
#try :
temp[mat.group(1)] = (int(mat.group(2),16), int(mat.group(3),16), int(mat.group(4),16))
#except ValueError : I want the exception to stop things
else :
pass
return temp
def loadObjdumpHeaderOffsets(bForce = False) :
getObjdumpHeaders(bForce)
try :
if os.path.getsize(cb_header_path) == 0 or os.path.getsize(patched_header_path) == 0 :
assert False
except OSError :
assert False
header1 = parseHeaderOffsets(cb_header_path)
header2 = parseHeaderOffsets(patched_header_path)
return header1, header2
'''
objOffsets = loadObjdumpOffsets()
hexOffsets = loadHexdumpOffsets()
print objOffsets
print hexOffsets
h1, h2 = loadObjdumpHeaderOffsets()
print h1
print h2
print getHexdumpDifferences()
'''
#now that we have the dumps (or should have the dumps) we can do a diff
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains GCP MLEngine operators.
"""
import logging
import re
import warnings
from typing import List, Optional
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator, BaseOperatorLink
from airflow.models.taskinstance import TaskInstance
from airflow.providers.google.cloud.hooks.mlengine import MLEngineHook
from airflow.utils.decorators import apply_defaults
log = logging.getLogger(__name__)
def _normalize_mlengine_job_id(job_id: str) -> str:
"""
Replaces invalid MLEngine job_id characters with '_'.
This also adds a leading 'z' in case job_id starts with an invalid
character.
:param job_id: A job_id str that may have invalid characters.
:type job_id: str:
:return: A valid job_id representation.
:rtype: str
"""
# Add a prefix when a job_id starts with a digit or a template
match = re.search(r'\d|\{{2}', job_id)
if match and match.start() == 0:
job = 'z_{}'.format(job_id)
else:
job = job_id
# Clean up 'bad' characters except templates
tracker = 0
cleansed_job_id = ''
for match in re.finditer(r'\{{2}.+?\}{2}', job):
cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_',
job[tracker:match.start()])
cleansed_job_id += job[match.start():match.end()]
tracker = match.end()
# Clean up last substring or the full string if no templates
cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_', job[tracker:])
return cleansed_job_id
class MLEngineStartBatchPredictionJobOperator(BaseOperator):
"""
Start a Google Cloud ML Engine prediction job.
NOTE: For model origin, users should consider exactly one from the
three options below:
1. Populate ``uri`` field only, which should be a GCS location that
points to a tensorflow savedModel directory.
2. Populate ``model_name`` field only, which refers to an existing
model, and the default version of the model will be used.
3. Populate both ``model_name`` and ``version_name`` fields, which
refers to a specific version of a specific model.
In options 2 and 3, both model and version name should contain the
minimal identifier. For instance, call::
MLEngineBatchPredictionOperator(
...,
model_name='my_model',
version_name='my_version',
...)
if the desired model version is
``projects/my_project/models/my_model/versions/my_version``.
See https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs
for further documentation on the parameters.
:param job_id: A unique id for the prediction job on Google Cloud
ML Engine. (templated)
:type job_id: str
:param data_format: The format of the input data.
It will default to 'DATA_FORMAT_UNSPECIFIED' if is not provided
or is not one of ["TEXT", "TF_RECORD", "TF_RECORD_GZIP"].
:type data_format: str
:param input_paths: A list of GCS paths of input data for batch
prediction. Accepting wildcard operator ``*``, but only at the end. (templated)
:type input_paths: list[str]
:param output_path: The GCS path where the prediction results are
written to. (templated)
:type output_path: str
:param region: The Google Compute Engine region to run the
prediction job in. (templated)
:type region: str
:param model_name: The Google Cloud ML Engine model to use for prediction.
If version_name is not provided, the default version of this
model will be used.
Should not be None if version_name is provided.
Should be None if uri is provided. (templated)
:type model_name: str
:param version_name: The Google Cloud ML Engine model version to use for
prediction.
Should be None if uri is provided. (templated)
:type version_name: str
:param uri: The GCS path of the saved model to use for prediction.
Should be None if model_name is provided.
It should be a GCS path pointing to a tensorflow SavedModel. (templated)
:type uri: str
:param max_worker_count: The maximum number of workers to be used
for parallel processing. Defaults to 10 if not specified. Should be a
string representing the worker count ("10" instead of 10, "50" instead
of 50, etc.)
:type max_worker_count: str
:param runtime_version: The Google Cloud ML Engine runtime version to use
for batch prediction.
:type runtime_version: str
:param signature_name: The name of the signature defined in the SavedModel
to use for this job.
:type signature_name: str
:param project_id: The Google Cloud project name where the prediction job is submitted.
If set to None or missing, the default project_id from the GCP connection is used. (templated)
:type project_id: str
:param gcp_conn_id: The connection ID used for connection to Google
Cloud Platform.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must
have domain-wide delegation enabled.
:type delegate_to: str
:raises: ``ValueError``: if a unique model/version origin cannot be
determined.
"""
template_fields = [
'_project_id',
'_job_id',
'_region',
'_input_paths',
'_output_path',
'_model_name',
'_version_name',
'_uri',
]
@apply_defaults
def __init__(self, # pylint: disable=too-many-arguments
job_id: str,
region: str,
data_format: str,
input_paths: List[str],
output_path: str,
model_name: Optional[str] = None,
version_name: Optional[str] = None,
uri: Optional[str] = None,
max_worker_count: Optional[int] = None,
runtime_version: Optional[str] = None,
signature_name: Optional[str] = None,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self._project_id = project_id
self._job_id = job_id
self._region = region
self._data_format = data_format
self._input_paths = input_paths
self._output_path = output_path
self._model_name = model_name
self._version_name = version_name
self._uri = uri
self._max_worker_count = max_worker_count
self._runtime_version = runtime_version
self._signature_name = signature_name
self._gcp_conn_id = gcp_conn_id
self._delegate_to = delegate_to
if not self._project_id:
raise AirflowException('Google Cloud project id is required.')
if not self._job_id:
raise AirflowException(
'An unique job id is required for Google MLEngine prediction '
'job.')
if self._uri:
if self._model_name or self._version_name:
raise AirflowException('Ambiguous model origin: Both uri and '
'model/version name are provided.')
if self._version_name and not self._model_name:
raise AirflowException(
'Missing model: Batch prediction expects '
'a model name when a version name is provided.')
if not (self._uri or self._model_name):
raise AirflowException(
'Missing model origin: Batch prediction expects a model, '
'a model & version combination, or a URI to a savedModel.')
def execute(self, context):
job_id = _normalize_mlengine_job_id(self._job_id)
prediction_request = {
'jobId': job_id,
'predictionInput': {
'dataFormat': self._data_format,
'inputPaths': self._input_paths,
'outputPath': self._output_path,
'region': self._region
}
}
if self._uri:
prediction_request['predictionInput']['uri'] = self._uri
elif self._model_name:
origin_name = 'projects/{}/models/{}'.format(
self._project_id, self._model_name)
if not self._version_name:
prediction_request['predictionInput'][
'modelName'] = origin_name
else:
prediction_request['predictionInput']['versionName'] = \
origin_name + '/versions/{}'.format(self._version_name)
if self._max_worker_count:
prediction_request['predictionInput'][
'maxWorkerCount'] = self._max_worker_count
if self._runtime_version:
prediction_request['predictionInput'][
'runtimeVersion'] = self._runtime_version
if self._signature_name:
prediction_request['predictionInput'][
'signatureName'] = self._signature_name
hook = MLEngineHook(self._gcp_conn_id, self._delegate_to)
# Helper method to check if the existing job's prediction input is the
# same as the request we get here.
def check_existing_job(existing_job):
return existing_job.get('predictionInput', None) == \
prediction_request['predictionInput']
finished_prediction_job = hook.create_job(
project_id=self._project_id, job=prediction_request, use_existing_job_fn=check_existing_job
)
if finished_prediction_job['state'] != 'SUCCEEDED':
self.log.error(
'MLEngine batch prediction job failed: %s', str(finished_prediction_job)
)
raise RuntimeError(finished_prediction_job['errorMessage'])
return finished_prediction_job['predictionOutput']
class MLEngineManageModelOperator(BaseOperator):
"""
Operator for managing a Google Cloud ML Engine model.
.. warning::
This operator is deprecated. Consider using operators for specific operations:
MLEngineCreateModelOperator, MLEngineGetModelOperator.
:param model: A dictionary containing the information about the model.
If the `operation` is `create`, then the `model` parameter should
contain all the information about this model such as `name`.
If the `operation` is `get`, the `model` parameter
should contain the `name` of the model.
:type model: dict
:param operation: The operation to perform. Available operations are:
* ``create``: Creates a new model as provided by the `model` parameter.
* ``get``: Gets a particular model where the name is specified in `model`.
:type operation: str
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the GCP connection is used. (templated)
:type project_id: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = [
'_project_id',
'_model',
]
@apply_defaults
def __init__(self,
model: dict,
operation: str = 'create',
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
warnings.warn(
"This operator is deprecated. Consider using operators for specific operations: "
"MLEngineCreateModelOperator, MLEngineGetModelOperator.",
DeprecationWarning,
stacklevel=3
)
self._project_id = project_id
self._model = model
self._operation = operation
self._gcp_conn_id = gcp_conn_id
self._delegate_to = delegate_to
def execute(self, context):
hook = MLEngineHook(
gcp_conn_id=self._gcp_conn_id, delegate_to=self._delegate_to)
if self._operation == 'create':
return hook.create_model(project_id=self._project_id, model=self._model)
elif self._operation == 'get':
return hook.get_model(project_id=self._project_id, model_name=self._model['name'])
else:
raise ValueError('Unknown operation: {}'.format(self._operation))
class MLEngineCreateModelOperator(BaseOperator):
"""
Creates a new model.
The model should be provided by the `model` parameter.
:param model: A dictionary containing the information about the model.
:type model: dict
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the GCP connection is used. (templated)
:type project_id: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = [
'_project_id',
'_model',
]
@apply_defaults
def __init__(self,
model: dict,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self._project_id = project_id
self._model = model
self._gcp_conn_id = gcp_conn_id
self._delegate_to = delegate_to
def execute(self, context):
hook = MLEngineHook(
gcp_conn_id=self._gcp_conn_id, delegate_to=self._delegate_to)
return hook.create_model(project_id=self._project_id, model=self._model)
class MLEngineGetModelOperator(BaseOperator):
"""
Gets a particular model
The name of model shold be specified in `model_name`.
:param model_name: The name of the model.
:type model_name: str
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the GCP connection is used. (templated)
:type project_id: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = [
'_project_id',
'_model_name',
]
@apply_defaults
def __init__(self,
model_name: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self._project_id = project_id
self._model_name = model_name
self._gcp_conn_id = gcp_conn_id
self._delegate_to = delegate_to
def execute(self, context):
hook = MLEngineHook(gcp_conn_id=self._gcp_conn_id, delegate_to=self._delegate_to)
return hook.get_model(project_id=self._project_id, model_name=self._model_name)
class MLEngineDeleteModelOperator(BaseOperator):
"""
Deletes a model.
The model should be provided by the `model_name` parameter.
:param model_name: The name of the model.
:type model_name: str
:param delete_contents: (Optional) Whether to force the deletion even if the models is not empty.
Will delete all version (if any) in the dataset if set to True.
The default value is False.
:type delete_contents: bool
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the GCP connection is used. (templated)
:type project_id: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = [
'_project_id',
'_model_name',
]
@apply_defaults
def __init__(self,
model_name: str,
delete_contents: bool = False,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self._project_id = project_id
self._model_name = model_name
self._delete_contents = delete_contents
self._gcp_conn_id = gcp_conn_id
self._delegate_to = delegate_to
def execute(self, context):
hook = MLEngineHook(gcp_conn_id=self._gcp_conn_id, delegate_to=self._delegate_to)
return hook.delete_model(
project_id=self._project_id, model_name=self._model_name, delete_contents=self._delete_contents
)
class MLEngineManageVersionOperator(BaseOperator):
"""
Operator for managing a Google Cloud ML Engine version.
.. warning::
This operator is deprecated. Consider using operators for specific operations:
MLEngineCreateVersionOperator, MLEngineSetDefaultVersionOperator,
MLEngineListVersionsOperator, MLEngineDeleteVersionOperator.
:param model_name: The name of the Google Cloud ML Engine model that the version
belongs to. (templated)
:type model_name: str
:param version_name: A name to use for the version being operated upon.
If not None and the `version` argument is None or does not have a value for
the `name` key, then this will be populated in the payload for the
`name` key. (templated)
:type version_name: str
:param version: A dictionary containing the information about the version.
If the `operation` is `create`, `version` should contain all the
information about this version such as name, and deploymentUrl.
If the `operation` is `get` or `delete`, the `version` parameter
should contain the `name` of the version.
If it is None, the only `operation` possible would be `list`. (templated)
:type version: dict
:param operation: The operation to perform. Available operations are:
* ``create``: Creates a new version in the model specified by `model_name`,
in which case the `version` parameter should contain all the
information to create that version
(e.g. `name`, `deploymentUrl`).
* ``set_defaults``: Sets a version in the model specified by `model_name` to be the default.
The name of the version should be specified in the `version`
parameter.
* ``list``: Lists all available versions of the model specified
by `model_name`.
* ``delete``: Deletes the version specified in `version` parameter from the
model specified by `model_name`).
The name of the version should be specified in the `version`
parameter.
:type operation: str
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the GCP connection is used. (templated)
:type project_id: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = [
'_project_id',
'_model_name',
'_version_name',
'_version',
]
@apply_defaults
def __init__(self,
model_name: str,
version_name: Optional[str] = None,
version: Optional[dict] = None,
operation: str = 'create',
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self._project_id = project_id
self._model_name = model_name
self._version_name = version_name
self._version = version or {}
self._operation = operation
self._gcp_conn_id = gcp_conn_id
self._delegate_to = delegate_to
warnings.warn(
"This operator is deprecated. Consider using operators for specific operations: "
"MLEngineCreateVersion, MLEngineSetDefaultVersion, MLEngineListVersions, MLEngineDeleteVersion.",
DeprecationWarning,
stacklevel=3
)
def execute(self, context):
if 'name' not in self._version:
self._version['name'] = self._version_name
hook = MLEngineHook(
gcp_conn_id=self._gcp_conn_id, delegate_to=self._delegate_to)
if self._operation == 'create':
if not self._version:
raise ValueError("version attribute of {} could not "
"be empty".format(self.__class__.__name__))
return hook.create_version(
project_id=self._project_id,
model_name=self._model_name,
version_spec=self._version
)
elif self._operation == 'set_default':
return hook.set_default_version(
project_id=self._project_id,
model_name=self._model_name,
version_name=self._version['name']
)
elif self._operation == 'list':
return hook.list_versions(
project_id=self._project_id,
model_name=self._model_name
)
elif self._operation == 'delete':
return hook.delete_version(
project_id=self._project_id,
model_name=self._model_name,
version_name=self._version['name']
)
else:
raise ValueError('Unknown operation: {}'.format(self._operation))
class MLEngineCreateVersionOperator(BaseOperator):
"""
Creates a new version in the model
Model should be specified by `model_name`, in which case the `version` parameter should contain all the
information to create that version
:param model_name: The name of the Google Cloud ML Engine model that the version belongs to. (templated)
:type model_name: str
:param version: A dictionary containing the information about the version. (templated)
:type version: dict
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the GCP connection is used. (templated)
:type project_id: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = [
'_project_id',
'_model_name',
'_version',
]
@apply_defaults
def __init__(self,
model_name: str,
version: dict,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self._project_id = project_id
self._model_name = model_name
self._version = version
self._gcp_conn_id = gcp_conn_id
self._delegate_to = delegate_to
self._validate_inputs()
def _validate_inputs(self):
if not self._model_name:
raise AirflowException("The model_name parameter could not be empty.")
if not self._version:
raise AirflowException("The version parameter could not be empty.")
def execute(self, context):
hook = MLEngineHook(gcp_conn_id=self._gcp_conn_id, delegate_to=self._delegate_to)
return hook.create_version(
project_id=self._project_id,
model_name=self._model_name,
version_spec=self._version
)
class MLEngineSetDefaultVersionOperator(BaseOperator):
"""
Sets a version in the model.
The model should be specified by `model_name` to be the default. The name of the version should be
specified in the `version_name` parameter.
:param model_name: The name of the Google Cloud ML Engine model that the version belongs to. (templated)
:type model_name: str
:param version_name: A name to use for the version being operated upon. (templated)
:type version_name: str
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the GCP connection is used. (templated)
:type project_id: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = [
'_project_id',
'_model_name',
'_version_name',
]
@apply_defaults
def __init__(self,
model_name: str,
version_name: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self._project_id = project_id
self._model_name = model_name
self._version_name = version_name
self._gcp_conn_id = gcp_conn_id
self._delegate_to = delegate_to
self._validate_inputs()
def _validate_inputs(self):
if not self._model_name:
raise AirflowException("The model_name parameter could not be empty.")
if not self._version_name:
raise AirflowException("The version_name parameter could not be empty.")
def execute(self, context):
hook = MLEngineHook(gcp_conn_id=self._gcp_conn_id, delegate_to=self._delegate_to)
return hook.set_default_version(
project_id=self._project_id,
model_name=self._model_name,
version_name=self._version_name
)
class MLEngineListVersionsOperator(BaseOperator):
"""
Lists all available versions of the model
The model should be specified by `model_name`.
:param model_name: The name of the Google Cloud ML Engine model that the version
belongs to. (templated)
:type model_name: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the GCP connection is used. (templated)
:type project_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = [
'_project_id',
'_model_name',
]
@apply_defaults
def __init__(self,
model_name: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self._project_id = project_id
self._model_name = model_name
self._gcp_conn_id = gcp_conn_id
self._delegate_to = delegate_to
self._validate_inputs()
def _validate_inputs(self):
if not self._model_name:
raise AirflowException("The model_name parameter could not be empty.")
def execute(self, context):
hook = MLEngineHook(gcp_conn_id=self._gcp_conn_id, delegate_to=self._delegate_to)
return hook.list_versions(
project_id=self._project_id,
model_name=self._model_name,
)
class MLEngineDeleteVersionOperator(BaseOperator):
"""
Deletes the version from the model.
The name of the version should be specified in `version_name` parameter from the model specified
by `model_name`.
:param model_name: The name of the Google Cloud ML Engine model that the version
belongs to. (templated)
:type model_name: str
:param version_name: A name to use for the version being operated upon. (templated)
:type version_name: str
:param project_id: The Google Cloud project name to which MLEngine
model belongs.
:type project_id: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = [
'_project_id',
'_model_name',
'_version_name',
]
@apply_defaults
def __init__(self,
model_name: str,
version_name: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self._project_id = project_id
self._model_name = model_name
self._version_name = version_name
self._gcp_conn_id = gcp_conn_id
self._delegate_to = delegate_to
self._validate_inputs()
def _validate_inputs(self):
if not self._model_name:
raise AirflowException("The model_name parameter could not be empty.")
if not self._version_name:
raise AirflowException("The version_name parameter could not be empty.")
def execute(self, context):
hook = MLEngineHook(gcp_conn_id=self._gcp_conn_id, delegate_to=self._delegate_to)
return hook.delete_version(
project_id=self._project_id,
model_name=self._model_name,
version_name=self._version_name
)
class AIPlatformConsoleLink(BaseOperatorLink):
"""
Helper class for constructing AI Platform Console link.
"""
name = "AI Platform Console"
def get_link(self, operator, dttm):
task_instance = TaskInstance(task=operator, execution_date=dttm)
gcp_metadata_dict = task_instance.xcom_pull(task_ids=operator.task_id, key="gcp_metadata")
if not gcp_metadata_dict:
return ''
job_id = gcp_metadata_dict['job_id']
project_id = gcp_metadata_dict['project_id']
console_link = f"https://console.cloud.google.com/ai-platform/jobs/{job_id}?project={project_id}"
return console_link
class MLEngineStartTrainingJobOperator(BaseOperator):
"""
Operator for launching a MLEngine training job.
:param job_id: A unique templated id for the submitted Google MLEngine
training job. (templated)
:type job_id: str
:param package_uris: A list of package locations for MLEngine training job,
which should include the main training program + any additional
dependencies. (templated)
:type package_uris: List[str]
:param training_python_module: The Python module name to run within MLEngine
training job after installing 'package_uris' packages. (templated)
:type training_python_module: str
:param training_args: A list of templated command line arguments to pass to
the MLEngine training program. (templated)
:type training_args: List[str]
:param region: The Google Compute Engine region to run the MLEngine training
job in (templated).
:type region: str
:param scale_tier: Resource tier for MLEngine training job. (templated)
:type scale_tier: str
:param master_type: Cloud ML Engine machine name.
Must be set when scale_tier is CUSTOM. (templated)
:type master_type: str
:param runtime_version: The Google Cloud ML runtime version to use for
training. (templated)
:type runtime_version: str
:param python_version: The version of Python used in training. (templated)
:type python_version: str
:param job_dir: A Google Cloud Storage path in which to store training
outputs and other data needed for training. (templated)
:type job_dir: str
:param project_id: The Google Cloud project name within which MLEngine training job should run.
If set to None or missing, the default project_id from the GCP connection is used. (templated)
:type project_id: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param mode: Can be one of 'DRY_RUN'/'CLOUD'. In 'DRY_RUN' mode, no real
training job will be launched, but the MLEngine training job request
will be printed out. In 'CLOUD' mode, a real MLEngine training job
creation request will be issued.
:type mode: str
"""
template_fields = [
'_project_id',
'_job_id',
'_package_uris',
'_training_python_module',
'_training_args',
'_region',
'_scale_tier',
'_master_type',
'_runtime_version',
'_python_version',
'_job_dir'
]
operator_extra_links = (
AIPlatformConsoleLink(),
)
@apply_defaults
def __init__(self, # pylint: disable=too-many-arguments
job_id: str,
package_uris: List[str],
training_python_module: str,
training_args: List[str],
region: str,
scale_tier: Optional[str] = None,
master_type: Optional[str] = None,
runtime_version: Optional[str] = None,
python_version: Optional[str] = None,
job_dir: Optional[str] = None,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
mode: str = 'PRODUCTION',
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self._project_id = project_id
self._job_id = job_id
self._package_uris = package_uris
self._training_python_module = training_python_module
self._training_args = training_args
self._region = region
self._scale_tier = scale_tier
self._master_type = master_type
self._runtime_version = runtime_version
self._python_version = python_version
self._job_dir = job_dir
self._gcp_conn_id = gcp_conn_id
self._delegate_to = delegate_to
self._mode = mode
if not self._project_id:
raise AirflowException('Google Cloud project id is required.')
if not self._job_id:
raise AirflowException(
'An unique job id is required for Google MLEngine training '
'job.')
if not package_uris:
raise AirflowException(
'At least one python package is required for MLEngine '
'Training job.')
if not training_python_module:
raise AirflowException(
'Python module name to run after installing required '
'packages is required.')
if not self._region:
raise AirflowException('Google Compute Engine region is required.')
if self._scale_tier is not None and self._scale_tier.upper() == "CUSTOM" and not self._master_type:
raise AirflowException(
'master_type must be set when scale_tier is CUSTOM')
def execute(self, context):
job_id = _normalize_mlengine_job_id(self._job_id)
training_request = {
'jobId': job_id,
'trainingInput': {
'scaleTier': self._scale_tier,
'packageUris': self._package_uris,
'pythonModule': self._training_python_module,
'region': self._region,
'args': self._training_args,
}
}
if self._runtime_version:
training_request['trainingInput']['runtimeVersion'] = self._runtime_version
if self._python_version:
training_request['trainingInput']['pythonVersion'] = self._python_version
if self._job_dir:
training_request['trainingInput']['jobDir'] = self._job_dir
if self._scale_tier is not None and self._scale_tier.upper() == "CUSTOM":
training_request['trainingInput']['masterType'] = self._master_type
if self._mode == 'DRY_RUN':
self.log.info('In dry_run mode.')
self.log.info('MLEngine Training job request is: %s', training_request)
return
hook = MLEngineHook(
gcp_conn_id=self._gcp_conn_id, delegate_to=self._delegate_to)
# Helper method to check if the existing job's training input is the
# same as the request we get here.
def check_existing_job(existing_job):
existing_training_input = existing_job.get('trainingInput', None)
requested_training_input = training_request['trainingInput']
if 'scaleTier' not in existing_training_input:
existing_training_input['scaleTier'] = None
existing_training_input['args'] = existing_training_input.get('args', None)
requested_training_input["args"] = requested_training_input['args'] \
if requested_training_input["args"] else None
return existing_training_input == requested_training_input
finished_training_job = hook.create_job(
project_id=self._project_id, job=training_request, use_existing_job_fn=check_existing_job
)
if finished_training_job['state'] != 'SUCCEEDED':
self.log.error('MLEngine training job failed: %s', str(finished_training_job))
raise RuntimeError(finished_training_job['errorMessage'])
gcp_metadata = {
"job_id": job_id,
"project_id": self._project_id,
}
context['task_instance'].xcom_push("gcp_metadata", gcp_metadata)
class MLEngineTrainingCancelJobOperator(BaseOperator):
"""
Operator for cleaning up failed MLEngine training job.
:param job_id: A unique templated id for the submitted Google MLEngine
training job. (templated)
:type job_id: str
:param project_id: The Google Cloud project name within which MLEngine training job should run.
If set to None or missing, the default project_id from the GCP connection is used. (templated)
:type project_id: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = [
'_project_id',
'_job_id',
]
@apply_defaults
def __init__(self,
job_id: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self._project_id = project_id
self._job_id = job_id
self._gcp_conn_id = gcp_conn_id
self._delegate_to = delegate_to
if not self._project_id:
raise AirflowException('Google Cloud project id is required.')
def execute(self, context):
hook = MLEngineHook(
gcp_conn_id=self._gcp_conn_id,
delegate_to=self._delegate_to
)
hook.cancel_job(project_id=self._project_id, job_id=_normalize_mlengine_job_id(self._job_id))
|
|
import unittest
import autocomplete_light.shortcuts as autocomplete_light
import django
from django.contrib.auth.models import User
from django.db import models
from django.test import TestCase
class Noname(models.Model):
number = models.CharField(max_length=100)
class Foo(models.Model):
name = models.CharField(max_length=100)
class Bar(autocomplete_light.AutocompleteModelBase):
pass
class Generic(autocomplete_light.AutocompleteGenericBase):
choices = (
User.objects.all(),
)
search_fields = (
('username',),
)
class RegistryTestCase(TestCase):
def setUp(self):
self.registry = autocomplete_light.AutocompleteRegistry()
def test_register_model(self):
self.registry.register(Foo)
self.assertIn('FooAutocomplete', self.registry.keys())
def test_register_model_and_autocomplete(self):
self.registry.register(Foo, Bar)
self.assertIn('FooBar', self.registry.keys())
def test_register_autocomplete(self):
self.registry.register(Bar)
self.assertIn('Bar', self.registry.keys())
def test_unregister(self):
self.registry.register(Bar)
self.registry.unregister('Bar')
self.assertEqual(list(self.registry.keys()), [])
def test_register_with_kwargs(self):
choices = ['foo']
self.registry.register(Foo, search_name='search_name', choices=choices)
self.assertEqual(self.registry['FooAutocomplete'].search_name,
'search_name')
self.assertEqual(self.registry['FooAutocomplete'].choices, choices)
def test_register_with_custom_autocomplete_model_base(self):
class NewBase(autocomplete_light.AutocompleteModelBase):
new_base = True
self.registry.autocomplete_model_base = NewBase
self.registry.register(Foo)
self.assertEqual(NewBase, self.registry['FooAutocomplete'].__base__)
self.assertTrue(self.registry['FooAutocomplete'].new_base)
def test_register_with_autocomplete_and_kwargs(self):
self.registry.register(Foo, Bar, search_name='search_name')
self.assertEqual(self.registry['FooBar'].search_name,
'search_name')
def test_register_with_custom_name(self):
self.registry.register(Foo, Bar, name='BarFoo')
self.assertIn('BarFoo', self.registry.keys())
self.assertEqual(self.registry['BarFoo'].__name__, 'BarFoo')
def test_register_no_name_fail(self):
try:
self.registry.register(Noname)
self.fail('Should raise an exception when registering noname')
except:
pass
def test_register_no_name_pass(self):
self.registry.register(Noname, search_fields=('number',))
def test_register_generic_with_custom_name(self):
self.registry.register(Generic, name='foo')
self.assertTrue('foo' in self.registry.keys())
def test_raise_AutocompleteNotRegistered(self):
try:
self.registry['NotRegistered']
self.fail('Should raise AutocompleteNotRegistered')
except autocomplete_light.AutocompleteNotRegistered:
pass
def test_raise_NoGenericAutocompleteRegistered(self):
self.assertRaises(autocomplete_light.NoGenericAutocompleteRegistered,
self.registry.autocomplete_for_generic)
def test_autocomplete_for_model(self):
class FirstAutocomplete(autocomplete_light.AutocompleteModelBase):
pass
class SecondAutocomplete(autocomplete_light.AutocompleteModelBase):
pass
self.registry.register(Foo, FirstAutocomplete)
self.registry.register(Foo, SecondAutocomplete)
self.assertTrue(issubclass(
self.registry.autocomplete_for_model(Foo), FirstAutocomplete))
def test_autocomplete_for_generic(self):
class FirstAutocomplete(Generic):
pass
class SecondAutocomplete(Generic):
pass
self.registry.register(FirstAutocomplete)
self.registry.register(SecondAutocomplete)
self.assertTrue(issubclass(
self.registry.autocomplete_for_generic(), FirstAutocomplete))
class RegistryGetAutocompleteFromArgTestCase(TestCase):
def setUp(self):
self.registry = autocomplete_light.AutocompleteRegistry()
self.registry.register(Foo)
self.registry.register(Generic)
def test_from_string(self):
a = self.registry.get_autocomplete_from_arg('FooAutocomplete')
self.assertEqual(a.model, Foo)
def test_from_model(self):
a = self.registry.get_autocomplete_from_arg(Foo)
self.assertEqual(a.model, Foo)
def test_from_model_instance(self):
a = self.registry.get_autocomplete_from_arg(Foo())
self.assertEqual(a.model, Foo)
def test_from_autocomplete_instance(self):
a = self.registry.get_autocomplete_from_arg(Generic)
self.assertEqual(a, Generic)
def test_default_generic(self):
a = self.registry.get_autocomplete_from_arg()
self.assertTrue(issubclass(a, Generic))
def test_model_picked_up_from_autocomplete_class_model(self):
# GitHub issue #313
class TestModel(models.Model):
name = models.CharField(max_length=100)
class XAutocomplete(autocomplete_light.AutocompleteModelBase):
model = TestModel
self.registry.register(XAutocomplete)
result = self.registry.get_autocomplete_from_arg(TestModel)
assert result
assert issubclass(result, XAutocomplete)
def test_model_picked_up_from_autocomplete_class_choices_model(self):
class TestModel(models.Model):
name = models.CharField(max_length=100)
class YAutocomplete(autocomplete_light.AutocompleteModelBase):
choices = TestModel.objects.all()
self.registry.register(YAutocomplete)
result = self.registry.get_autocomplete_from_arg(TestModel)
assert result
assert issubclass(result, YAutocomplete)
def test_registering_autocomplete_without_model_name_as_prefix(self):
class TestModel(models.Model):
name = models.CharField(max_length=100)
class Base(autocomplete_light.AutocompleteModelBase):
pass
class BarAutocomplete(Base):
model = TestModel
choices = TestModel.objects.all()
self.registry.register(BarAutocomplete)
assert 'BarAutocomplete' in self.registry
result = self.registry.get_autocomplete_from_arg(TestModel)
assert result
assert issubclass(result, BarAutocomplete)
@unittest.skipIf(django.VERSION < (1, 7), 'require django 1.7')
class AppConfigSupportTestCase(TestCase):
def test_appconfig_with_registry_file(self):
self.assertIsInstance(autocomplete_light.registry['AppConfigWithRegistryAutocomplete'](),
autocomplete_light.AutocompleteListBase)
def test_appconfig_without_registry_file(self):
self.assertIsInstance(autocomplete_light.registry['AppConfigWithoutRegistryAutocomplete'](),
autocomplete_light.AutocompleteListBase)
|
|
# -*- coding: utf-8 -*-
# (c) 2009-2022 Martin Wendt and contributors; see WsgiDAV https://github.com/mar10/wsgidav
# Original PyFileServer (c) 2005 Ho Chun Wei.
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
"""
Implements two property managers: one in-memory (dict-based), and one
persistent low performance variant using shelve.
The properties dictionaray is built like::
{ ref-url1: {propname1: value1,
propname2: value2,
},
ref-url2: {propname1: value1,
propname2: value2,
},
}
"""
import os
import shelve
from wsgidav import util
from wsgidav.rw_lock import ReadWriteLock
# TODO: comment's from Ian Bicking (2005)
# @@: Use of shelve means this is only really useful in a threaded environment.
# And if you have just a single-process threaded environment, you could get
# nearly the same effect with a dictionary of threading.Lock() objects. Of course,
# it would be better to move off shelve anyway, probably to a system with
# a directory of per-file locks, using the file locking primitives (which,
# sadly, are not quite portable).
# @@: It would probably be easy to store the properties as pickle objects
# in a parallel directory structure to the files you are describing.
# Pickle is expedient, but later you could use something more readable
# (pickles aren't particularly readable)
__docformat__ = "reStructuredText"
_logger = util.get_module_logger("wsgidav.prop_man")
# ========================================================================
# PropertyManager
# ========================================================================
class PropertyManager:
"""
An in-memory property manager implementation using a dictionary.
This is obviously not persistent, but should be enough in some cases.
For a persistent implementation, see property_manager.ShelvePropertyManager().
"""
def __init__(self):
self._dict = None
self._loaded = False
self._lock = ReadWriteLock()
self._verbose = 3
def __repr__(self):
return "PropertyManager"
def __del__(self):
if __debug__ and self._verbose >= 4:
self._check()
self._close()
def _lazy_open(self):
_logger.debug("_lazy_open()")
self._lock.acquire_write()
try:
self._dict = {}
self._loaded = True
finally:
self._lock.release()
def _sync(self):
pass
def _close(self):
_logger.debug("_close()")
self._lock.acquire_write()
try:
self._dict = None
self._loaded = False
finally:
self._lock.release()
def _check(self, msg=""):
try:
if not self._loaded:
return True
for k, v in self._dict.items():
_dummy = "{}, {}".format(k, v) # noqa
# _logger.debug("{} checks ok {}".format(self.__class__.__name__, msg))
return True
except Exception:
_logger.exception(
"{} _check: ERROR {}".format(self.__class__.__name__, msg)
)
return False
def _dump(self, msg=""):
_logger.info("{}({}): {}".format(self.__class__.__name__, self.__repr__(), msg))
if not self._loaded:
self._lazy_open()
if self._verbose >= 4:
return # Already dumped in _lazy_open
try:
for k, v in self._dict.items():
_logger.info(" {}".format(k))
for k2, v2 in v.items():
try:
_logger.info(" {}: '{}'".format(k2, v2))
except Exception as e:
_logger.info(" {}: ERROR {}".format(k2, e))
# _logger.flush()
except Exception as e:
_logger.error("PropertyManager._dump() ERROR: {}".format(e))
def get_properties(self, norm_url, environ=None):
_logger.debug("get_properties({})".format(norm_url))
self._lock.acquire_read()
try:
if not self._loaded:
self._lazy_open()
returnlist = []
if norm_url in self._dict:
for propdata in self._dict[norm_url].keys():
returnlist.append(propdata)
return returnlist
finally:
self._lock.release()
def get_property(self, norm_url, name, environ=None):
_logger.debug("get_property({}, {})".format(norm_url, name))
self._lock.acquire_read()
try:
if not self._loaded:
self._lazy_open()
if norm_url not in self._dict:
return None
# TODO: sometimes we get exceptions here: (catch or otherwise make
# more robust?)
try:
resourceprops = self._dict[norm_url]
except Exception as e:
_logger.exception(
"get_property({}, {}) failed : {}".format(norm_url, name, e)
)
raise
return resourceprops.get(name)
finally:
self._lock.release()
def write_property(
self, norm_url, name, property_value, dry_run=False, environ=None
):
assert norm_url and norm_url.startswith("/")
assert name # and name.startswith("{")
assert property_value is not None
_logger.debug(
"write_property({}, {}, dry_run={}):\n\t{}".format(
norm_url, name, dry_run, property_value
)
)
if dry_run:
return # TODO: can we check anything here?
self._lock.acquire_write()
try:
if not self._loaded:
self._lazy_open()
if norm_url in self._dict:
locatordict = self._dict[norm_url]
else:
locatordict = {} # dict([])
locatordict[name] = property_value
# This re-assignment is important, so Shelve realizes the change:
self._dict[norm_url] = locatordict
self._sync()
if __debug__ and self._verbose >= 4:
self._check()
finally:
self._lock.release()
def remove_property(self, norm_url, name, dry_run=False, environ=None):
"""
Specifying the removal of a property that does not exist is NOT an error.
"""
_logger.debug(
"remove_property({}, {}, dry_run={})".format(norm_url, name, dry_run)
)
if dry_run:
# TODO: can we check anything here?
return
self._lock.acquire_write()
try:
if not self._loaded:
self._lazy_open()
if norm_url in self._dict:
locatordict = self._dict[norm_url]
if name in locatordict:
del locatordict[name]
# This re-assignment is important, so Shelve realizes the
# change:
self._dict[norm_url] = locatordict
self._sync()
if __debug__ and self._verbose >= 4:
self._check()
finally:
self._lock.release()
def remove_properties(self, norm_url, environ=None):
_logger.debug("remove_properties({})".format(norm_url))
self._lock.acquire_write()
try:
if not self._loaded:
self._lazy_open()
if norm_url in self._dict:
del self._dict[norm_url]
self._sync()
finally:
self._lock.release()
def copy_properties(self, src_url, dest_url, environ=None):
_logger.debug("copy_properties({}, {})".format(src_url, dest_url))
self._lock.acquire_write()
try:
if __debug__ and self._verbose >= 4:
self._check()
if not self._loaded:
self._lazy_open()
if src_url in self._dict:
self._dict[dest_url] = self._dict[src_url].copy()
self._sync()
if __debug__ and self._verbose >= 4:
self._check("after copy")
finally:
self._lock.release()
def move_properties(self, src_url, dest_url, with_children, environ=None):
_logger.debug(
"move_properties({}, {}, {})".format(src_url, dest_url, with_children)
)
self._lock.acquire_write()
try:
if __debug__ and self._verbose >= 4:
self._check()
if not self._loaded:
self._lazy_open()
if with_children:
# Move src_url\*
for url in list(self._dict.keys()):
if util.is_equal_or_child_uri(src_url, url):
d = url.replace(src_url, dest_url)
self._dict[d] = self._dict[url]
del self._dict[url]
elif src_url in self._dict:
# Move src_url only
self._dict[dest_url] = self._dict[src_url]
del self._dict[src_url]
self._sync()
if __debug__ and self._verbose >= 4:
self._check("after move")
finally:
self._lock.release()
# ========================================================================
# ShelvePropertyManager
# ========================================================================
class ShelvePropertyManager(PropertyManager):
"""
A low performance property manager implementation using shelve
"""
def __init__(self, storage_path):
self._storage_path = os.path.abspath(storage_path)
super().__init__()
def __repr__(self):
return "ShelvePropertyManager({})".format(self._storage_path)
def _lazy_open(self):
_logger.debug("_lazy_open({})".format(self._storage_path))
self._lock.acquire_write()
try:
# Test again within the critical section
if self._loaded:
return True
# Open with writeback=False, which is faster, but we have to be
# careful to re-assign values to _dict after modifying them
self._dict = shelve.open(self._storage_path, writeback=False)
self._loaded = True
if __debug__ and self._verbose >= 4:
self._check("After shelve.open()")
self._dump("After shelve.open()")
finally:
self._lock.release()
def _sync(self):
"""Write persistent dictionary to disc."""
_logger.debug("_sync()")
self._lock.acquire_write() # TODO: read access is enough?
try:
if self._loaded:
self._dict.sync()
finally:
self._lock.release()
def _close(self):
_logger.debug("_close()")
self._lock.acquire_write()
try:
if self._loaded:
self._dict.close()
self._dict = None
self._loaded = False
finally:
self._lock.release()
def clear(self):
"""Delete all entries."""
self._lock.acquire_write()
try:
was_closed = self._dict is None
if was_closed:
self.open()
if len(self._dict):
self._dict.clear()
self._dict.sync()
if was_closed:
self.close()
finally:
self._lock.release()
|
|
try:
from urllib.request import urlopen
from urllib.error import HTTPError
except ImportError:
from urllib2 import urlopen, HTTPError
from ceph_deploy import exc
import logging
import re
import socket
from ceph_deploy.lib import remoto
LOG = logging.getLogger(__name__)
# TODO: at some point, it might be way more accurate to do this in the actual
# host where we need to get IPs from. SaltStack does this by calling `ip` and
# parsing the output, which is probably the one true way of dealing with it.
def get_nonlocal_ip(host, subnet=None):
"""
Search result of getaddrinfo() for a non-localhost-net address
"""
try:
ailist = socket.getaddrinfo(host, None)
except socket.gaierror:
raise exc.UnableToResolveError(host)
for ai in ailist:
# an ai is a 5-tuple; the last element is (ip, port)
ip = ai[4][0]
if subnet and ip_in_subnet(ip, subnet):
LOG.info('found ip (%s) for host (%s) to be in cluster subnet (%s)' % (
ip,
host,
subnet,)
)
return ip
if not ip.startswith('127.'):
if subnet:
LOG.warning('could not match ip (%s) for host (%s) for cluster subnet (%s)' % (
ip,
host,
subnet,)
)
return ip
raise exc.UnableToResolveError(host)
def ip_in_subnet(ip, subnet):
"""Does IP exists in a given subnet utility. Returns a boolean"""
ipaddr = int(''.join(['%02x' % int(x) for x in ip.split('.')]), 16)
netstr, bits = subnet.split('/')
netaddr = int(''.join(['%02x' % int(x) for x in netstr.split('.')]), 16)
mask = (0xffffffff << (32 - int(bits))) & 0xffffffff
return (ipaddr & mask) == (netaddr & mask)
def in_subnet(cidr, addrs=None):
"""
Returns True if host is within specified subnet, otherwise False
"""
for address in addrs:
if ip_in_subnet(address, cidr):
return True
return False
def ip_addresses(conn, interface=None, include_loopback=False):
"""
Returns a list of IPv4/IPv6 addresses assigned to the host. 127.0.0.1/::1 is
ignored, unless 'include_loopback=True' is indicated. If 'interface' is
provided, then only IP addresses from that interface will be returned.
Example output looks like::
>>> ip_addresses(conn)
>>> ['192.168.1.111', '10.0.1.12', '2001:db8::100']
"""
ret = set()
ifaces = linux_interfaces(conn)
if interface is None:
target_ifaces = ifaces
else:
target_ifaces = dict((k, v) for k, v in ifaces.items()
if k == interface)
if not target_ifaces:
LOG.error('Interface {0} not found.'.format(interface))
for info in target_ifaces.values():
for ipv4 in info.get('inet', []):
loopback = in_subnet('127.0.0.0/8', [ipv4.get('address')]) or ipv4.get('label') == 'lo'
if not loopback or include_loopback:
ret.add(ipv4['address'])
for secondary in info.get('secondary', []):
addr = secondary.get('address')
if addr and secondary.get('type') == 'inet':
if include_loopback or (not include_loopback and not in_subnet('127.0.0.0/8', [addr])):
ret.add(addr)
for ipv6 in info.get('inet6', []):
# When switching to Python 3 the IPAddress module can do all this work for us
if ipv6.get('address').startswith('fe80::'):
continue
if not include_loopback and '::1' == ipv6.get('address'):
continue
ret.add(ipv6['address'])
if ret:
conn.logger.debug('IP addresses found: %s' % str(list(ret)))
return sorted(list(ret))
def linux_interfaces(conn):
"""
Obtain interface information for *NIX/BSD variants in remote servers.
Example output from a remote node with a couple of interfaces::
{'eth0': {'hwaddr': '08:00:27:08:c2:e4',
'inet': [{'address': '10.0.2.15',
'broadcast': '10.0.2.255',
'label': 'eth0',
'netmask': '255.255.255.0'}],
'inet6': [{'address': 'fe80::a00:27ff:fe08:c2e4',
'prefixlen': '64'}],
'up': True},
'eth1': {'hwaddr': '08:00:27:70:06:f1',
'inet': [{'address': '192.168.111.101',
'broadcast': '192.168.111.255',
'label': 'eth1',
'netmask': '255.255.255.0'}],
'inet6': [{'address': 'fe80::a00:27ff:fe70:6f1',
'prefixlen': '64'}],
'up': True},
'lo': {'hwaddr': '00:00:00:00:00:00',
'inet': [{'address': '127.0.0.1',
'broadcast': None,
'label': 'lo',
'netmask': '255.0.0.0'}],
'inet6': [{'address': '::1', 'prefixlen': '128'}],
'up': True}}
:param conn: A connection object to a remote node
"""
ifaces = dict()
ip_path = conn.remote_module.which('ip')
ifconfig_path = None if ip_path else conn.remote_module.which('ifconfig')
if ip_path:
cmd1, _, _ = remoto.process.check(
conn,
[
'{0}'.format(ip_path),
'link',
'show',
],
)
cmd2, _, _ = remoto.process.check(
conn,
[
'{0}'.format(ip_path),
'addr',
'show',
],
)
ifaces = _interfaces_ip('\n'.join(cmd1) + '\n' +
'\n'.join(cmd2))
elif ifconfig_path:
cmd, _, _ = remoto.process.check(
conn,
[
'{0}'.format(ifconfig_path),
'-a',
]
)
ifaces = _interfaces_ifconfig('\n'.join(cmd))
return ifaces
def _interfaces_ip(out):
"""
Uses ip to return a dictionary of interfaces with various information about
each (up/down state, ip address, netmask, and hwaddr)
"""
ret = dict()
def parse_network(value, cols):
"""
Return a tuple of ip, netmask, broadcast
based on the current set of cols
"""
brd = None
if '/' in value: # we have a CIDR in this address
ip, cidr = value.split('/') # pylint: disable=C0103
else:
ip = value # pylint: disable=C0103
cidr = 32
if type_ == 'inet':
mask = cidr_to_ipv4_netmask(int(cidr))
if 'brd' in cols:
brd = cols[cols.index('brd') + 1]
elif type_ == 'inet6':
mask = cidr
return (ip, mask, brd)
groups = re.compile('\r?\n\\d').split(out)
for group in groups:
iface = None
data = dict()
for line in group.splitlines():
if ' ' not in line:
continue
match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line)
if match:
iface, parent, attrs = match.groups()
if 'UP' in attrs.split(','):
data['up'] = True
else:
data['up'] = False
if parent:
data['parent'] = parent
continue
cols = line.split()
if len(cols) >= 2:
type_, value = tuple(cols[0:2])
iflabel = cols[-1:][0]
if type_ in ('inet', 'inet6'):
if 'secondary' not in cols:
ipaddr, netmask, broadcast = parse_network(value, cols)
if type_ == 'inet':
if 'inet' not in data:
data['inet'] = list()
addr_obj = dict()
addr_obj['address'] = ipaddr
addr_obj['netmask'] = netmask
addr_obj['broadcast'] = broadcast
addr_obj['label'] = iflabel
data['inet'].append(addr_obj)
elif type_ == 'inet6':
if 'inet6' not in data:
data['inet6'] = list()
addr_obj = dict()
addr_obj['address'] = ipaddr
addr_obj['prefixlen'] = netmask
data['inet6'].append(addr_obj)
else:
if 'secondary' not in data:
data['secondary'] = list()
ip_, mask, brd = parse_network(value, cols)
data['secondary'].append({
'type': type_,
'address': ip_,
'netmask': mask,
'broadcast': brd,
'label': iflabel,
})
del ip_, mask, brd
elif type_.startswith('link'):
data['hwaddr'] = value
if iface:
ret[iface] = data
del iface, data
return ret
def _interfaces_ifconfig(out):
"""
Uses ifconfig to return a dictionary of interfaces with various information
about each (up/down state, ip address, netmask, and hwaddr)
"""
ret = dict()
piface = re.compile(r'^([^\s:]+)')
pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)')
pip = re.compile(r'.*?(?:inet addr:|inet )(.*?)\s')
pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)')
pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))')
pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+)).*')
pupdown = re.compile('UP')
pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)')
groups = re.compile('\r?\n(?=\\S)').split(out)
for group in groups:
data = dict()
iface = ''
updown = False
for line in group.splitlines():
miface = piface.match(line)
mmac = pmac.match(line)
mip = pip.match(line)
mip6 = pip6.match(line)
mupdown = pupdown.search(line)
if miface:
iface = miface.group(1)
if mmac:
data['hwaddr'] = mmac.group(1)
if mip:
if 'inet' not in data:
data['inet'] = list()
addr_obj = dict()
addr_obj['address'] = mip.group(1)
mmask = pmask.match(line)
if mmask:
if mmask.group(1):
mmask = _number_of_set_bits_to_ipv4_netmask(
int(mmask.group(1), 16))
else:
mmask = mmask.group(2)
addr_obj['netmask'] = mmask
mbcast = pbcast.match(line)
if mbcast:
addr_obj['broadcast'] = mbcast.group(1)
data['inet'].append(addr_obj)
if mupdown:
updown = True
if mip6:
if 'inet6' not in data:
data['inet6'] = list()
addr_obj = dict()
addr_obj['address'] = mip6.group(1) or mip6.group(2)
mmask6 = pmask6.match(line)
if mmask6:
addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2)
data['inet6'].append(addr_obj)
data['up'] = updown
ret[iface] = data
del data
return ret
def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103
"""
Returns an IPv4 netmask from the integer representation of that mask.
Ex. 0xffffff00 -> '255.255.255.0'
"""
return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits))
def _number_of_set_bits(x):
"""
Returns the number of bits that are set in a 32bit int
"""
# Taken from http://stackoverflow.com/a/4912729. Many thanks!
x -= (x >> 1) & 0x55555555
x = ((x >> 2) & 0x33333333) + (x & 0x33333333)
x = ((x >> 4) + x) & 0x0f0f0f0f
x += x >> 8
x += x >> 16
return x & 0x0000003f
def cidr_to_ipv4_netmask(cidr_bits):
"""
Returns an IPv4 netmask
"""
try:
cidr_bits = int(cidr_bits)
if not 1 <= cidr_bits <= 32:
return ''
except ValueError:
return ''
netmask = ''
for idx in range(4):
if idx:
netmask += '.'
if cidr_bits >= 8:
netmask += '255'
cidr_bits -= 8
else:
netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits)))
cidr_bits = 0
return netmask
def get_request(url):
try:
return urlopen(url)
except HTTPError as err:
LOG.error('repository might not be available yet')
raise RuntimeError('%s, failed to fetch %s' % (err, url))
def get_chacra_repo(shaman_url):
"""
From a Shaman URL, get the chacra url for a repository, read the
contents that point to the repo and return it as a string.
"""
shaman_response = get_request(shaman_url)
chacra_url = shaman_response.geturl()
chacra_response = get_request(chacra_url)
return chacra_response.read()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains Base AWS Hook.
.. seealso::
For more information on how to use this hook, take a look at the guide:
:ref:`howto/connection:AWSHook`
"""
import configparser
import datetime
import logging
import warnings
from functools import wraps
from typing import Any, Callable, Dict, Optional, Tuple, Union
import boto3
import botocore
import botocore.session
import requests
import tenacity
from botocore.config import Config
from botocore.credentials import ReadOnlyCredentials
from slugify import slugify
try:
from functools import cached_property
except ImportError:
from cached_property import cached_property
from dateutil.tz import tzlocal
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models.connection import Connection
from airflow.utils.log.logging_mixin import LoggingMixin
class _SessionFactory(LoggingMixin):
def __init__(self, conn: Connection, region_name: Optional[str], config: Config) -> None:
super().__init__()
self.conn = conn
self.region_name = region_name
self.config = config
self.extra_config = self.conn.extra_dejson
self.basic_session = None
self.role_arn = None
def create_session(self) -> boto3.session.Session:
"""Create AWS session."""
session_kwargs = {}
if "session_kwargs" in self.extra_config:
self.log.info(
"Retrieving session_kwargs from Connection.extra_config['session_kwargs']: %s",
self.extra_config["session_kwargs"],
)
session_kwargs = self.extra_config["session_kwargs"]
self.basic_session = self._create_basic_session(session_kwargs=session_kwargs)
self.role_arn = self._read_role_arn_from_extra_config()
# If role_arn was specified then STS + assume_role
if self.role_arn is None:
return self.basic_session
return self._create_session_with_assume_role(session_kwargs=session_kwargs)
def _create_basic_session(self, session_kwargs: Dict[str, Any]) -> boto3.session.Session:
aws_access_key_id, aws_secret_access_key = self._read_credentials_from_connection()
aws_session_token = self.extra_config.get("aws_session_token")
region_name = self.region_name
if self.region_name is None and 'region_name' in self.extra_config:
self.log.info("Retrieving region_name from Connection.extra_config['region_name']")
region_name = self.extra_config["region_name"]
self.log.info(
"Creating session with aws_access_key_id=%s region_name=%s",
aws_access_key_id,
region_name,
)
return boto3.session.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region_name,
aws_session_token=aws_session_token,
**session_kwargs,
)
def _create_session_with_assume_role(self, session_kwargs: Dict[str, Any]) -> boto3.session.Session:
assume_role_method = self.extra_config.get('assume_role_method', 'assume_role')
self.log.info("assume_role_method=%s", assume_role_method)
supported_methods = ['assume_role', 'assume_role_with_saml', 'assume_role_with_web_identity']
if assume_role_method not in supported_methods:
raise NotImplementedError(
f'assume_role_method={assume_role_method} in Connection {self.conn.conn_id} Extra.'
f'Currently {supported_methods} are supported.'
'(Exclude this setting will default to "assume_role").'
)
if assume_role_method == 'assume_role_with_web_identity':
# Deferred credentials have no initial credentials
credential_fetcher = self._get_web_identity_credential_fetcher()
credentials = botocore.credentials.DeferredRefreshableCredentials(
method='assume-role-with-web-identity',
refresh_using=credential_fetcher.fetch_credentials,
time_fetcher=lambda: datetime.datetime.now(tz=tzlocal()),
)
else:
# Refreshable credentials do have initial credentials
credentials = botocore.credentials.RefreshableCredentials.create_from_metadata(
metadata=self._refresh_credentials(),
refresh_using=self._refresh_credentials,
method="sts-assume-role",
)
session = botocore.session.get_session()
session._credentials = credentials # pylint: disable=protected-access
region_name = self.basic_session.region_name
session.set_config_variable("region", region_name)
return boto3.session.Session(botocore_session=session, **session_kwargs)
def _refresh_credentials(self) -> Dict[str, Any]:
self.log.info('Refreshing credentials')
assume_role_method = self.extra_config.get('assume_role_method', 'assume_role')
sts_session = self.basic_session
if assume_role_method == 'assume_role':
sts_client = sts_session.client("sts", config=self.config)
sts_response = self._assume_role(sts_client=sts_client)
elif assume_role_method == 'assume_role_with_saml':
sts_client = sts_session.client("sts", config=self.config)
sts_response = self._assume_role_with_saml(sts_client=sts_client)
else:
raise NotImplementedError(f'assume_role_method={assume_role_method} not expected')
sts_response_http_status = sts_response['ResponseMetadata']['HTTPStatusCode']
if not sts_response_http_status == 200:
raise Exception(f'sts_response_http_status={sts_response_http_status}')
credentials = sts_response['Credentials']
expiry_time = credentials.get('Expiration').isoformat()
self.log.info(f'New credentials expiry_time:{expiry_time}')
credentials = {
"access_key": credentials.get("AccessKeyId"),
"secret_key": credentials.get("SecretAccessKey"),
"token": credentials.get("SessionToken"),
"expiry_time": expiry_time,
}
return credentials
def _read_role_arn_from_extra_config(self) -> Optional[str]:
aws_account_id = self.extra_config.get("aws_account_id")
aws_iam_role = self.extra_config.get("aws_iam_role")
role_arn = self.extra_config.get("role_arn")
if role_arn is None and aws_account_id is not None and aws_iam_role is not None:
self.log.info("Constructing role_arn from aws_account_id and aws_iam_role")
role_arn = f"arn:aws:iam::{aws_account_id}:role/{aws_iam_role}"
self.log.info("role_arn is %s", role_arn)
return role_arn
def _read_credentials_from_connection(self) -> Tuple[Optional[str], Optional[str]]:
aws_access_key_id = None
aws_secret_access_key = None
if self.conn.login:
aws_access_key_id = self.conn.login
aws_secret_access_key = self.conn.password
self.log.info("Credentials retrieved from login")
elif "aws_access_key_id" in self.extra_config and "aws_secret_access_key" in self.extra_config:
aws_access_key_id = self.extra_config["aws_access_key_id"]
aws_secret_access_key = self.extra_config["aws_secret_access_key"]
self.log.info("Credentials retrieved from extra_config")
elif "s3_config_file" in self.extra_config:
aws_access_key_id, aws_secret_access_key = _parse_s3_config(
self.extra_config["s3_config_file"],
self.extra_config.get("s3_config_format"),
self.extra_config.get("profile"),
)
self.log.info("Credentials retrieved from extra_config['s3_config_file']")
else:
self.log.info("No credentials retrieved from Connection")
return aws_access_key_id, aws_secret_access_key
def _strip_invalid_session_name_characters(self, role_session_name: str) -> str:
return slugify(role_session_name, regex_pattern=r'[^\w+=,.@-]+')
def _assume_role(self, sts_client: boto3.client) -> Dict:
assume_role_kwargs = self.extra_config.get("assume_role_kwargs", {})
if "external_id" in self.extra_config: # Backwards compatibility
assume_role_kwargs["ExternalId"] = self.extra_config.get("external_id")
role_session_name = self._strip_invalid_session_name_characters(f"Airflow_{self.conn.conn_id}")
self.log.info(
"Doing sts_client.assume_role to role_arn=%s (role_session_name=%s)",
self.role_arn,
role_session_name,
)
return sts_client.assume_role(
RoleArn=self.role_arn, RoleSessionName=role_session_name, **assume_role_kwargs
)
def _assume_role_with_saml(self, sts_client: boto3.client) -> Dict[str, Any]:
saml_config = self.extra_config['assume_role_with_saml']
principal_arn = saml_config['principal_arn']
idp_auth_method = saml_config['idp_auth_method']
if idp_auth_method == 'http_spegno_auth':
saml_assertion = self._fetch_saml_assertion_using_http_spegno_auth(saml_config)
else:
raise NotImplementedError(
f'idp_auth_method={idp_auth_method} in Connection {self.conn.conn_id} Extra.'
'Currently only "http_spegno_auth" is supported, and must be specified.'
)
self.log.info("Doing sts_client.assume_role_with_saml to role_arn=%s", self.role_arn)
assume_role_kwargs = self.extra_config.get("assume_role_kwargs", {})
return sts_client.assume_role_with_saml(
RoleArn=self.role_arn,
PrincipalArn=principal_arn,
SAMLAssertion=saml_assertion,
**assume_role_kwargs,
)
def _get_idp_response(
self, saml_config: Dict[str, Any], auth: requests.auth.AuthBase
) -> requests.models.Response:
idp_url = saml_config["idp_url"]
self.log.info("idp_url= %s", idp_url)
session = requests.Session()
# Configurable Retry when querying the IDP endpoint
if "idp_request_retry_kwargs" in saml_config:
idp_request_retry_kwargs = saml_config["idp_request_retry_kwargs"]
self.log.info("idp_request_retry_kwargs= %s", idp_request_retry_kwargs)
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
retry_strategy = Retry(**idp_request_retry_kwargs)
adapter = HTTPAdapter(max_retries=retry_strategy)
session.mount("https://", adapter)
session.mount("http://", adapter)
idp_request_kwargs = {}
if "idp_request_kwargs" in saml_config:
idp_request_kwargs = saml_config["idp_request_kwargs"]
idp_response = session.get(idp_url, auth=auth, **idp_request_kwargs)
idp_response.raise_for_status()
return idp_response
def _fetch_saml_assertion_using_http_spegno_auth(self, saml_config: Dict[str, Any]) -> str:
# requests_gssapi will need paramiko > 2.6 since you'll need
# 'gssapi' not 'python-gssapi' from PyPi.
# https://github.com/paramiko/paramiko/pull/1311
import requests_gssapi
from lxml import etree
auth = requests_gssapi.HTTPSPNEGOAuth()
if 'mutual_authentication' in saml_config:
mutual_auth = saml_config['mutual_authentication']
if mutual_auth == 'REQUIRED':
auth = requests_gssapi.HTTPSPNEGOAuth(requests_gssapi.REQUIRED)
elif mutual_auth == 'OPTIONAL':
auth = requests_gssapi.HTTPSPNEGOAuth(requests_gssapi.OPTIONAL)
elif mutual_auth == 'DISABLED':
auth = requests_gssapi.HTTPSPNEGOAuth(requests_gssapi.DISABLED)
else:
raise NotImplementedError(
f'mutual_authentication={mutual_auth} in Connection {self.conn.conn_id} Extra.'
'Currently "REQUIRED", "OPTIONAL" and "DISABLED" are supported.'
'(Exclude this setting will default to HTTPSPNEGOAuth() ).'
)
# Query the IDP
idp_response = self._get_idp_response(saml_config, auth=auth)
# Assist with debugging. Note: contains sensitive info!
xpath = saml_config['saml_response_xpath']
log_idp_response = 'log_idp_response' in saml_config and saml_config['log_idp_response']
if log_idp_response:
self.log.warning(
'The IDP response contains sensitive information, but log_idp_response is ON (%s).',
log_idp_response,
)
self.log.info('idp_response.content= %s', idp_response.content)
self.log.info('xpath= %s', xpath)
# Extract SAML Assertion from the returned HTML / XML
xml = etree.fromstring(idp_response.content)
saml_assertion = xml.xpath(xpath)
if isinstance(saml_assertion, list):
if len(saml_assertion) == 1:
saml_assertion = saml_assertion[0]
if not saml_assertion:
raise ValueError('Invalid SAML Assertion')
return saml_assertion
def _get_web_identity_credential_fetcher(
self,
) -> botocore.credentials.AssumeRoleWithWebIdentityCredentialFetcher:
base_session = self.basic_session._session or botocore.session.get_session()
client_creator = base_session.create_client
federation = self.extra_config.get('assume_role_with_web_identity_federation')
if federation == 'google':
web_identity_token_loader = self._get_google_identity_token_loader()
else:
raise AirflowException(
f'Unsupported federation: {federation}. Currently "google" only are supported.'
)
assume_role_kwargs = self.extra_config.get("assume_role_kwargs", {})
return botocore.credentials.AssumeRoleWithWebIdentityCredentialFetcher(
client_creator=client_creator,
web_identity_token_loader=web_identity_token_loader,
role_arn=self.role_arn,
extra_args=assume_role_kwargs,
)
def _get_google_identity_token_loader(self):
from google.auth.transport import requests as requests_transport
from airflow.providers.google.common.utils.id_token_credentials import (
get_default_id_token_credentials,
)
audience = self.extra_config.get('assume_role_with_web_identity_federation_audience')
google_id_token_credentials = get_default_id_token_credentials(target_audience=audience)
def web_identity_token_loader():
if not google_id_token_credentials.valid:
request_adapter = requests_transport.Request()
google_id_token_credentials.refresh(request=request_adapter)
return google_id_token_credentials.token
return web_identity_token_loader
class AwsBaseHook(BaseHook):
"""
Interact with AWS.
This class is a thin wrapper around the boto3 python library.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:type aws_conn_id: str
:param verify: Whether or not to verify SSL certificates.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:type verify: Union[bool, str, None]
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:type region_name: Optional[str]
:param client_type: boto3.client client_type. Eg 's3', 'emr' etc
:type client_type: Optional[str]
:param resource_type: boto3.resource resource_type. Eg 'dynamodb' etc
:type resource_type: Optional[str]
:param config: Configuration for botocore client.
(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html)
:type config: Optional[botocore.client.Config]
"""
conn_name_attr = 'aws_conn_id'
default_conn_name = 'aws_default'
conn_type = 'aws'
hook_name = 'Amazon Web Services'
def __init__(
self,
aws_conn_id: Optional[str] = default_conn_name,
verify: Union[bool, str, None] = None,
region_name: Optional[str] = None,
client_type: Optional[str] = None,
resource_type: Optional[str] = None,
config: Optional[Config] = None,
) -> None:
super().__init__()
self.aws_conn_id = aws_conn_id
self.verify = verify
self.client_type = client_type
self.resource_type = resource_type
self.region_name = region_name
self.config = config
if not (self.client_type or self.resource_type):
raise AirflowException('Either client_type or resource_type must be provided.')
def _get_credentials(self, region_name: Optional[str]) -> Tuple[boto3.session.Session, Optional[str]]:
if not self.aws_conn_id:
session = boto3.session.Session(region_name=region_name)
return session, None
self.log.info("Airflow Connection: aws_conn_id=%s", self.aws_conn_id)
try:
# Fetch the Airflow connection object
connection_object = self.get_connection(self.aws_conn_id)
extra_config = connection_object.extra_dejson
endpoint_url = extra_config.get("host")
# https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html#botocore.config.Config
if "config_kwargs" in extra_config:
self.log.info(
"Retrieving config_kwargs from Connection.extra_config['config_kwargs']: %s",
extra_config["config_kwargs"],
)
self.config = Config(**extra_config["config_kwargs"])
session = _SessionFactory(
conn=connection_object, region_name=region_name, config=self.config
).create_session()
return session, endpoint_url
except AirflowException:
self.log.warning("Unable to use Airflow Connection for credentials.")
self.log.info("Fallback on boto3 credential strategy")
# http://boto3.readthedocs.io/en/latest/guide/configuration.html
self.log.info(
"Creating session using boto3 credential strategy region_name=%s",
region_name,
)
session = boto3.session.Session(region_name=region_name)
return session, None
def get_client_type(
self,
client_type: Optional[str] = None,
region_name: Optional[str] = None,
config: Optional[Config] = None,
) -> boto3.client:
"""Get the underlying boto3 client using boto3 session"""
session, endpoint_url = self._get_credentials(region_name)
if client_type:
warnings.warn(
"client_type is deprecated. Set client_type from class attribute.",
DeprecationWarning,
stacklevel=2,
)
else:
client_type = self.client_type
# No AWS Operators use the config argument to this method.
# Keep backward compatibility with other users who might use it
if config is None:
config = self.config
return session.client(client_type, endpoint_url=endpoint_url, config=config, verify=self.verify)
def get_resource_type(
self,
resource_type: Optional[str] = None,
region_name: Optional[str] = None,
config: Optional[Config] = None,
) -> boto3.resource:
"""Get the underlying boto3 resource using boto3 session"""
session, endpoint_url = self._get_credentials(region_name)
if resource_type:
warnings.warn(
"resource_type is deprecated. Set resource_type from class attribute.",
DeprecationWarning,
stacklevel=2,
)
else:
resource_type = self.resource_type
# No AWS Operators use the config argument to this method.
# Keep backward compatibility with other users who might use it
if config is None:
config = self.config
return session.resource(resource_type, endpoint_url=endpoint_url, config=config, verify=self.verify)
@cached_property
def conn(self) -> Union[boto3.client, boto3.resource]:
"""
Get the underlying boto3 client/resource (cached)
:return: boto3.client or boto3.resource
:rtype: Union[boto3.client, boto3.resource]
"""
if self.client_type:
return self.get_client_type(self.client_type, region_name=self.region_name)
elif self.resource_type:
return self.get_resource_type(self.resource_type, region_name=self.region_name)
else:
# Rare possibility - subclasses have not specified a client_type or resource_type
raise NotImplementedError('Could not get boto3 connection!')
def get_conn(self) -> Union[boto3.client, boto3.resource]:
"""
Get the underlying boto3 client/resource (cached)
Implemented so that caching works as intended. It exists for compatibility
with subclasses that rely on a super().get_conn() method.
:return: boto3.client or boto3.resource
:rtype: Union[boto3.client, boto3.resource]
"""
# Compat shim
return self.conn
def get_session(self, region_name: Optional[str] = None) -> boto3.session.Session:
"""Get the underlying boto3.session."""
session, _ = self._get_credentials(region_name)
return session
def get_credentials(self, region_name: Optional[str] = None) -> ReadOnlyCredentials:
"""
Get the underlying `botocore.Credentials` object.
This contains the following authentication attributes: access_key, secret_key and token.
"""
session, _ = self._get_credentials(region_name)
# Credentials are refreshable, so accessing your access key and
# secret key separately can lead to a race condition.
# See https://stackoverflow.com/a/36291428/8283373
return session.get_credentials().get_frozen_credentials()
def expand_role(self, role: str) -> str:
"""
If the IAM role is a role name, get the Amazon Resource Name (ARN) for the role.
If IAM role is already an IAM role ARN, no change is made.
:param role: IAM role name or ARN
:return: IAM role ARN
"""
if "/" in role:
return role
else:
return self.get_client_type("iam").get_role(RoleName=role)["Role"]["Arn"]
@staticmethod
def retry(should_retry: Callable[[Exception], bool]):
"""
A decorator that provides a mechanism to repeat requests in response to exceeding a temporary quote
limit.
"""
def retry_decorator(fun: Callable):
@wraps(fun)
def decorator_f(self, *args, **kwargs):
retry_args = getattr(self, 'retry_args', None)
if retry_args is None:
return fun(self, *args, **kwargs)
multiplier = retry_args.get('multiplier', 1)
min_limit = retry_args.get('min', 1)
max_limit = retry_args.get('max', 1)
stop_after_delay = retry_args.get('stop_after_delay', 10)
tenacity_logger = tenacity.before_log(self.log, logging.DEBUG) if self.log else None
default_kwargs = {
'wait': tenacity.wait_exponential(multiplier=multiplier, max=max_limit, min=min_limit),
'retry': tenacity.retry_if_exception(should_retry),
'stop': tenacity.stop_after_delay(stop_after_delay),
'before': tenacity_logger,
'after': tenacity_logger,
}
return tenacity.retry(**default_kwargs)(fun)(self, *args, **kwargs)
return decorator_f
return retry_decorator
def _parse_s3_config(
config_file_name: str, config_format: Optional[str] = "boto", profile: Optional[str] = None
) -> Tuple[Optional[str], Optional[str]]:
"""
Parses a config file for s3 credentials. Can currently
parse boto, s3cmd.conf and AWS SDK config formats
:param config_file_name: path to the config file
:type config_file_name: str
:param config_format: config type. One of "boto", "s3cmd" or "aws".
Defaults to "boto"
:type config_format: str
:param profile: profile name in AWS type config file
:type profile: str
"""
config = configparser.ConfigParser()
if config.read(config_file_name): # pragma: no cover
sections = config.sections()
else:
raise AirflowException(f"Couldn't read {config_file_name}")
# Setting option names depending on file format
if config_format is None:
config_format = "boto"
conf_format = config_format.lower()
if conf_format == "boto": # pragma: no cover
if profile is not None and "profile " + profile in sections:
cred_section = "profile " + profile
else:
cred_section = "Credentials"
elif conf_format == "aws" and profile is not None:
cred_section = profile
else:
cred_section = "default"
# Option names
if conf_format in ("boto", "aws"): # pragma: no cover
key_id_option = "aws_access_key_id"
secret_key_option = "aws_secret_access_key"
# security_token_option = 'aws_security_token'
else:
key_id_option = "access_key"
secret_key_option = "secret_key"
# Actual Parsing
if cred_section not in sections:
raise AirflowException("This config file format is not recognized")
else:
try:
access_key = config.get(cred_section, key_id_option)
secret_key = config.get(cred_section, secret_key_option)
except Exception:
logging.warning("Option Error in parsing s3 config file")
raise
return access_key, secret_key
|
|
"""
The Message class represents a message that is sent or received and contains
methods for publishing the message, or in the case that the message was
delivered by RabbitMQ, acknowledging it, rejecting it or negatively
acknowledging it.
"""
import datetime
import json
import logging
import math
import time
import pprint
import uuid
from pamqp import body
from pamqp import header
from pamqp import specification
from rabbitpy import base
from rabbitpy import exceptions
from rabbitpy import utils
LOGGER = logging.getLogger(__name__)
# Python 2.6/3.2 does not have a memoryview object, create dummy for isinstance
try:
py26mv = memoryview(b'foo')
except NameError:
class memoryview(object):
pass
class Properties(specification.Basic.Properties):
"""Proxy class for :py:class:`pamqp.specification.Basic.Properties`"""
pass
class Message(base.AMQPClass):
"""Created by both rabbitpy internally when a message is delivered or
returned from RabbitMQ and by implementing applications, the Message class
is used to publish a message to and access and respond to a message from
RabbitMQ.
When specifying properties for a message, pass in a dict of key value items
that match the AMQP Basic.Properties specification with a small caveat.
Due to an overlap in the AMQP specification and the Python keyword
:code:`type`, the :code:`type` property is referred to as
:code:`message_type`.
The following is a list of the available properties:
* app_id
* content_type
* content_encoding
* correlation_id
* delivery_node
* expiration
* headers
* message_id
* message_type
* priority
* reply_to
* timestamp
* user_id
**Automated features**
When passing in the body value, if it is a dict or list, it will
automatically be JSON serialized and the content type ``application/json``
will be set on the message properties.
When publishing a message to RabbitMQ, if the opinionated value is ``True``
and no ``message_id`` value was passed in as a property, a UUID will be
generated and specified as a property of the message.
Additionally, if opinionated is ``True`` and the ``timestamp`` property
is not specified when passing in ``properties``, the current Unix epoch
value will be set in the message properties.
.. note:: As of 0.21.0 ``auto_id`` is deprecated in favor of
``opinionated`` and it will be removed in a future version. As of
0.22.0 ``opinionated`` is defaulted to ``False``.
:param channel: The channel object for the message object to act upon
:type channel: :py:class:`rabbitpy.channel.Channel`
:param str or dict or list body_value: The message body
:param dict properties: A dictionary of message properties
:param bool auto_id: Add a message id if no properties were passed in.
:param bool opinionated: Automatically populate properties if True
:raises KeyError: Raised when an invalid property is passed in
"""
method = None
name = 'Message'
def __init__(self, channel, body_value, properties=None,
auto_id=False, opinionated=False):
"""Create a new instance of the Message object."""
super(Message, self).__init__(channel, 'Message')
# Always have a dict of properties set
self.properties = properties or {}
# Assign the body value
if isinstance(body_value, memoryview):
self.body = bytes(body_value)
else:
self.body = self._auto_serialize(body_value)
# Add a message id if auto_id is not turned off and it is not set
if (opinionated or auto_id) and 'message_id' not in self.properties:
if auto_id:
raise DeprecationWarning('Use opinionated instead of auto_id')
self._add_auto_message_id()
if opinionated:
if 'timestamp' not in self.properties:
self._add_timestamp()
# Enforce datetime timestamps
if 'timestamp' in self.properties:
self.properties['timestamp'] = \
self._as_datetime(self.properties['timestamp'])
# Don't let invalid property keys in
if self._invalid_properties:
msg = 'Invalid property: %s' % self._invalid_properties[0]
raise KeyError(msg)
@property
def delivery_tag(self):
"""Return the delivery tag for a message that was delivered or gotten
from RabbitMQ.
:rtype: int or None
"""
return self.method.delivery_tag if self.method else None
@property
def redelivered(self):
"""Indicates if this message may have been delivered before (but not
acknowledged)"
:rtype: bool or None
"""
return self.method.redelivered if self.method else None
@property
def routing_key(self):
"""Return the routing_key for a message that was delivered or gotten
from RabbitMQ.
:rtype: int or None
"""
return self.method.routing_key if self.method else None
@property
def exchange(self):
"""Return the source exchange for a message that was delivered or
gotten from RabbitMQ.
:rtype: string or None
"""
return self.method.exchange if self.method else None
def ack(self, all_previous=False):
"""Acknowledge receipt of the message to RabbitMQ. Will raise an
ActionException if the message was not received from a broker.
:raises: ActionException
"""
if not self.method:
raise exceptions.ActionException('Can not ack non-received '
'message')
basic_ack = specification.Basic.Ack(self.method.delivery_tag,
multiple=all_previous)
self.channel.write_frame(basic_ack)
def json(self):
"""Deserialize the message body if it is JSON, returning the value.
:rtype: any
"""
try:
return json.loads(self.body)
except TypeError: # pragma: no cover
return json.loads(self.body.decode('utf-8'))
def nack(self, requeue=False, all_previous=False):
"""Negatively acknowledge receipt of the message to RabbitMQ. Will
raise an ActionException if the message was not received from a broker.
:param bool requeue: Requeue the message
:param bool all_previous: Nack all previous unacked messages up to and
including this one
:raises: ActionException
"""
if not self.method:
raise exceptions.ActionException('Can not nack non-received '
'message')
basic_nack = specification.Basic.Nack(self.method.delivery_tag,
requeue=requeue,
multiple=all_previous)
self.channel.write_frame(basic_nack)
def pprint(self, properties=False): # pragma: no cover
"""Print a formatted representation of the message.
:param bool properties: Include properties in the representation
"""
print('Exchange: %s\n' % self.method.exchange)
print('Routing Key: %s\n' % self.method.routing_key)
if properties:
print('Properties:\n')
pprint.pprint(self.properties)
print('\nBody:\n')
pprint.pprint(self.body)
def publish(self, exchange, routing_key='', mandatory=False,
immediate=False):
"""Publish the message to the exchange with the specified routing
key.
In Python 2 if the message is a ``unicode`` value it will be converted
to a ``str`` using ``str.encode('UTF-8')``. If you do not want the
auto-conversion to take place, set the body to a ``str`` or ``bytes``
value prior to publishing.
In Python 3 if the message is a ``str`` value it will be converted to
a ``bytes`` value using ``bytes(value.encode('UTF-8'))``. If you do
not want the auto-conversion to take place, set the body to a
``bytes`` value prior to publishing.
:param exchange: The exchange to publish the message to
:type exchange: str or :class:`rabbitpy.Exchange`
:param str routing_key: The routing key to use
:param bool mandatory: Requires the message is published
:param bool immediate: Request immediate delivery
:return: bool or None
:raises: rabbitpy.exceptions.MessageReturnedException
"""
if isinstance(exchange, base.AMQPClass):
exchange = exchange.name
# Coerce the body to the proper type
payload = utils.maybe_utf8_encode(self.body)
frames = [specification.Basic.Publish(exchange=exchange,
routing_key=routing_key or '',
mandatory=mandatory,
immediate=immediate),
header.ContentHeader(body_size=len(payload),
properties=self._properties)]
# Calculate how many body frames are needed
pieces = int(math.ceil(len(payload) /
float(self.channel.maximum_frame_size)))
# Send the message
for offset in range(0, pieces):
start = self.channel.maximum_frame_size * offset
end = start + self.channel.maximum_frame_size
if end > len(payload):
end = len(payload)
frames.append(body.ContentBody(payload[start:end]))
# Write the frames out
self.channel.write_frames(frames)
# If publisher confirmations are enabled, wait for the response
if self.channel.publisher_confirms:
response = self.channel.wait_for_confirmation()
if isinstance(response, specification.Basic.Ack):
return True
elif isinstance(response, specification.Basic.Nack):
return False
else:
raise exceptions.UnexpectedResponseError(response)
def reject(self, requeue=False):
"""Reject receipt of the message to RabbitMQ. Will raise
an ActionException if the message was not received from a broker.
:param bool requeue: Requeue the message
:raises: ActionException
"""
if not self.method:
raise exceptions.ActionException('Can not reject non-received '
'message')
basic_reject = specification.Basic.Reject(self.method.delivery_tag,
requeue=requeue)
self.channel.write_frame(basic_reject)
def _add_auto_message_id(self):
"""Set the message_id property to a new UUID."""
self.properties['message_id'] = str(uuid.uuid4())
def _add_timestamp(self):
"""Add the timestamp to the properties"""
self.properties['timestamp'] = datetime.datetime.now()
def _as_datetime(self, value):
"""Return the passed in value as a ``datetime.datetime`` value.
:param value: The value to convert or pass through
:type value: datetime.datetime
:type value: time.struct_time
:type value: int
:type value: float
:type value: str
:type value: bytes
:type value: unicode
:rtype: datetime.datetime
:raises: TypeError
"""
if value is None:
return None
if isinstance(value, datetime.datetime):
return value
if isinstance(value, time.struct_time):
return datetime.datetime(*value[:6])
if utils.is_string(value):
value = int(value)
if isinstance(value, float) or isinstance(value, int):
return datetime.datetime.fromtimestamp(value)
raise TypeError('Could not cast a %s value to a datetime.datetime' %
type(value))
def _auto_serialize(self, body_value):
"""Automatically serialize the body as JSON if it is a dict or list.
:param mixed body_value: The message body passed into the constructor
:return: str or bytes or unicode or None
"""
if isinstance(body_value, dict) or isinstance(body_value, list):
self.properties['content_type'] = 'application/json'
return json.dumps(body_value, ensure_ascii=False)
return body_value
def _coerce_properties(self):
"""Force properties to be set to the correct data type"""
for key, value in self.properties.items():
_type = specification.Basic.Properties.type(key)
if self.properties[key] is None:
continue
if _type == 'shortstr':
if not utils.is_string(value):
LOGGER.warning('Coercing property %s to bytes', key)
value = str(value)
self.properties[key] = utils.maybe_utf8_encode(value)
elif _type == 'octet' and not isinstance(value, int):
LOGGER.warning('Coercing property %s to int', key)
try:
self.properties[key] = int(value)
except TypeError as error:
LOGGER.warning('Could not coerce %s: %s', key, error)
elif _type == 'table' and not isinstance(value, dict):
LOGGER.warning('Resetting invalid value for %s to None', key)
self.properties[key] = {}
if key == 'timestamp':
self.properties[key] = self._as_datetime(value)
@property
def _invalid_properties(self):
"""Return a list of invalid properties that currently exist in the the
properties that are set.
:rtype: list
"""
return [key for key in self.properties
if key not in specification.Basic.Properties.attributes()]
@property
def _properties(self):
"""Return a new Basic.Properties object representing the message
properties.
:rtype: pamqp.specification.Basic.Properties
"""
self._prune_invalid_properties()
self._coerce_properties()
return specification.Basic.Properties(**self.properties)
def _prune_invalid_properties(self):
"""Remove invalid properties from the message properties."""
for key in self._invalid_properties:
LOGGER.warning('Removing invalid property "%s"', key)
del self.properties[key]
|
|
"""Tests for certbot.client."""
import os
import shutil
import tempfile
import unittest
import OpenSSL
import mock
from acme import jose
from certbot import account
from certbot import errors
from certbot import util
from certbot.tests import test_util
KEY = test_util.load_vector("rsa512_key.pem")
CSR_SAN = test_util.load_vector("csr-san.der")
class ConfigHelper(object):
"""Creates a dummy object to imitate a namespace object
Example: cfg = ConfigHelper(redirect=True, hsts=False, uir=False)
will result in: cfg.redirect=True, cfg.hsts=False, etc.
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
class RegisterTest(unittest.TestCase):
"""Tests for certbot.client.register."""
def setUp(self):
self.config = mock.MagicMock(rsa_key_size=1024, register_unsafely_without_email=False)
self.account_storage = account.AccountMemoryStorage()
self.tos_cb = mock.MagicMock()
def _call(self):
from certbot.client import register
return register(self.config, self.account_storage, self.tos_cb)
def test_no_tos(self):
with mock.patch("certbot.client.acme_client.Client") as mock_client:
mock_client.register().terms_of_service = "http://tos"
with mock.patch("certbot.account.report_new_account"):
self.tos_cb.return_value = False
self.assertRaises(errors.Error, self._call)
self.tos_cb.return_value = True
self._call()
self.tos_cb = None
self._call()
def test_it(self):
with mock.patch("certbot.client.acme_client.Client"):
with mock.patch("certbot.account.report_new_account"):
self._call()
@mock.patch("certbot.account.report_new_account")
@mock.patch("certbot.client.display_ops.get_email")
def test_email_retry(self, _rep, mock_get_email):
from acme import messages
self.config.noninteractive_mode = False
msg = "DNS problem: NXDOMAIN looking up MX for example.com"
mx_err = messages.Error.with_code('invalidContact', detail=msg)
with mock.patch("certbot.client.acme_client.Client") as mock_client:
mock_client().register.side_effect = [mx_err, mock.MagicMock()]
self._call()
self.assertEqual(mock_get_email.call_count, 1)
@mock.patch("certbot.account.report_new_account")
def test_email_invalid_noninteractive(self, _rep):
from acme import messages
msg = "DNS problem: NXDOMAIN looking up MX for example.com"
mx_err = messages.Error.with_code('invalidContact', detail=msg)
with mock.patch("certbot.client.acme_client.Client") as mock_client:
mock_client().register.side_effect = [mx_err, mock.MagicMock()]
self.assertRaises(errors.Error, self._call)
def test_needs_email(self):
self.config.email = None
self.assertRaises(errors.Error, self._call)
@mock.patch("certbot.client.logger")
def test_without_email(self, mock_logger):
with mock.patch("certbot.client.acme_client.Client"):
with mock.patch("certbot.account.report_new_account"):
self.config.email = None
self.config.register_unsafely_without_email = True
self.config.dry_run = False
self._call()
mock_logger.warning.assert_called_once_with(mock.ANY)
def test_unsupported_error(self):
from acme import messages
msg = "Test"
mx_err = messages.Error(detail=msg, typ="malformed", title="title")
with mock.patch("certbot.client.acme_client.Client") as mock_client:
mock_client().register.side_effect = [mx_err, mock.MagicMock()]
self.assertRaises(messages.Error, self._call)
class ClientTest(unittest.TestCase):
"""Tests for certbot.client.Client."""
def setUp(self):
self.config = mock.MagicMock(
no_verify_ssl=False, config_dir="/etc/letsencrypt", allow_subset_of_names=False)
# pylint: disable=star-args
self.account = mock.MagicMock(**{"key.pem": KEY})
self.eg_domains = ["example.com", "www.example.com"]
from certbot.client import Client
with mock.patch("certbot.client.acme_client.Client") as acme:
self.acme_client = acme
self.acme = acme.return_value = mock.MagicMock()
self.client = Client(
config=self.config, account_=self.account,
auth=None, installer=None)
def test_init_acme_verify_ssl(self):
net = self.acme_client.call_args[1]["net"]
self.assertTrue(net.verify_ssl)
def _mock_obtain_certificate(self):
self.client.auth_handler = mock.MagicMock()
self.client.auth_handler.get_authorizations.return_value = [None]
self.acme.request_issuance.return_value = mock.sentinel.certr
self.acme.fetch_chain.return_value = mock.sentinel.chain
def _check_obtain_certificate(self):
self.client.auth_handler.get_authorizations.assert_called_once_with(
self.eg_domains,
self.config.allow_subset_of_names)
authzr = self.client.auth_handler.get_authorizations()
self.acme.request_issuance.assert_called_once_with(
jose.ComparableX509(OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, CSR_SAN)),
authzr)
self.acme.fetch_chain.assert_called_once_with(mock.sentinel.certr)
@mock.patch("certbot.client.logger")
def test_obtain_certificate_from_csr(self, mock_logger):
self._mock_obtain_certificate()
test_csr = util.CSR(form="der", file=None, data=CSR_SAN)
auth_handler = self.client.auth_handler
authzr = auth_handler.get_authorizations(self.eg_domains, False)
self.assertEqual(
(mock.sentinel.certr, mock.sentinel.chain),
self.client.obtain_certificate_from_csr(
self.eg_domains,
test_csr,
authzr=authzr))
# and that the cert was obtained correctly
self._check_obtain_certificate()
# Test for authzr=None
self.assertEqual(
(mock.sentinel.certr, mock.sentinel.chain),
self.client.obtain_certificate_from_csr(
self.eg_domains,
test_csr,
authzr=None))
auth_handler.get_authorizations.assert_called_with(self.eg_domains)
# Test for no auth_handler
self.client.auth_handler = None
self.assertRaises(
errors.Error,
self.client.obtain_certificate_from_csr,
self.eg_domains,
test_csr)
mock_logger.warning.assert_called_once_with(mock.ANY)
@mock.patch("certbot.client.crypto_util")
def test_obtain_certificate(self, mock_crypto_util):
self._mock_obtain_certificate()
csr = util.CSR(form="der", file=None, data=CSR_SAN)
mock_crypto_util.init_save_csr.return_value = csr
mock_crypto_util.init_save_key.return_value = mock.sentinel.key
domains = ["example.com", "www.example.com"]
# return_value is essentially set to (None, None) in
# _mock_obtain_certificate(), which breaks this test.
# Thus fixed by the next line.
authzr = []
# domain ordering should not be affected by authorization order
for domain in reversed(domains):
authzr.append(
mock.MagicMock(
body=mock.MagicMock(
identifier=mock.MagicMock(
value=domain))))
self.client.auth_handler.get_authorizations.return_value = authzr
self.assertEqual(
self.client.obtain_certificate(domains),
(mock.sentinel.certr, mock.sentinel.chain, mock.sentinel.key, csr))
mock_crypto_util.init_save_key.assert_called_once_with(
self.config.rsa_key_size, self.config.key_dir)
mock_crypto_util.init_save_csr.assert_called_once_with(
mock.sentinel.key, domains, self.config.csr_dir)
self._check_obtain_certificate()
@mock.patch("certbot.cli.helpful_parser")
def test_save_certificate(self, mock_parser):
# pylint: disable=too-many-locals
certs = ["matching_cert.pem", "cert.pem", "cert-san.pem"]
tmp_path = tempfile.mkdtemp()
os.chmod(tmp_path, 0o755) # TODO: really??
certr = mock.MagicMock(body=test_util.load_comparable_cert(certs[0]))
chain_cert = [test_util.load_comparable_cert(certs[1]),
test_util.load_comparable_cert(certs[2])]
candidate_cert_path = os.path.join(tmp_path, "certs", "cert.pem")
candidate_chain_path = os.path.join(tmp_path, "chains", "chain.pem")
candidate_fullchain_path = os.path.join(tmp_path, "chains", "fullchain.pem")
mock_parser.verb = "certonly"
mock_parser.args = ["--cert-path", candidate_cert_path,
"--chain-path", candidate_chain_path,
"--fullchain-path", candidate_fullchain_path]
cert_path, chain_path, fullchain_path = self.client.save_certificate(
certr, chain_cert, candidate_cert_path, candidate_chain_path,
candidate_fullchain_path)
self.assertEqual(os.path.dirname(cert_path),
os.path.dirname(candidate_cert_path))
self.assertEqual(os.path.dirname(chain_path),
os.path.dirname(candidate_chain_path))
self.assertEqual(os.path.dirname(fullchain_path),
os.path.dirname(candidate_fullchain_path))
with open(cert_path, "rb") as cert_file:
cert_contents = cert_file.read()
self.assertEqual(cert_contents, test_util.load_vector(certs[0]))
with open(chain_path, "rb") as chain_file:
chain_contents = chain_file.read()
self.assertEqual(chain_contents, test_util.load_vector(certs[1]) +
test_util.load_vector(certs[2]))
shutil.rmtree(tmp_path)
def test_deploy_certificate_success(self):
self.assertRaises(errors.Error, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
installer = mock.MagicMock()
self.client.installer = installer
self.client.deploy_certificate(
["foo.bar"], "key", "cert", "chain", "fullchain")
installer.deploy_cert.assert_called_once_with(
cert_path=os.path.abspath("cert"),
chain_path=os.path.abspath("chain"),
domain='foo.bar',
fullchain_path='fullchain',
key_path=os.path.abspath("key"))
self.assertEqual(installer.save.call_count, 2)
installer.restart.assert_called_once_with()
def test_deploy_certificate_failure(self):
installer = mock.MagicMock()
self.client.installer = installer
installer.deploy_cert.side_effect = errors.PluginError
self.assertRaises(errors.PluginError, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
installer.recovery_routine.assert_called_once_with()
def test_deploy_certificate_save_failure(self):
installer = mock.MagicMock()
self.client.installer = installer
installer.save.side_effect = errors.PluginError
self.assertRaises(errors.PluginError, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
installer.recovery_routine.assert_called_once_with()
@mock.patch("certbot.client.zope.component.getUtility")
def test_deploy_certificate_restart_failure(self, mock_get_utility):
installer = mock.MagicMock()
installer.restart.side_effect = [errors.PluginError, None]
self.client.installer = installer
self.assertRaises(errors.PluginError, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
self.assertEqual(mock_get_utility().add_message.call_count, 1)
installer.rollback_checkpoints.assert_called_once_with()
self.assertEqual(installer.restart.call_count, 2)
@mock.patch("certbot.client.zope.component.getUtility")
def test_deploy_certificate_restart_failure2(self, mock_get_utility):
installer = mock.MagicMock()
installer.restart.side_effect = errors.PluginError
installer.rollback_checkpoints.side_effect = errors.ReverterError
self.client.installer = installer
self.assertRaises(errors.PluginError, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
self.assertEqual(mock_get_utility().add_message.call_count, 1)
installer.rollback_checkpoints.assert_called_once_with()
self.assertEqual(installer.restart.call_count, 1)
@mock.patch("certbot.client.enhancements")
def test_enhance_config(self, mock_enhancements):
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.Error, self.client.enhance_config,
["foo.bar"], config, None)
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect"]
self.client.enhance_config(["foo.bar"], config, None)
installer.enhance.assert_called_once_with("foo.bar", "redirect", None)
self.assertEqual(installer.save.call_count, 1)
installer.restart.assert_called_once_with()
@mock.patch("certbot.client.enhancements")
def test_enhance_config_no_ask(self, mock_enhancements):
config = ConfigHelper(redirect=True, hsts=False,
uir=False, staple=False)
self.assertRaises(errors.Error, self.client.enhance_config,
["foo.bar"], config, None)
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = [
"redirect", "ensure-http-header", "staple-ocsp"]
config = ConfigHelper(redirect=True, hsts=False,
uir=False, staple=False)
self.client.enhance_config(["foo.bar"], config, None)
installer.enhance.assert_called_with("foo.bar", "redirect", None)
config = ConfigHelper(redirect=False, hsts=True,
uir=False, staple=False)
self.client.enhance_config(["foo.bar"], config, None)
installer.enhance.assert_called_with("foo.bar", "ensure-http-header",
"Strict-Transport-Security")
config = ConfigHelper(redirect=False, hsts=False,
uir=True, staple=False)
self.client.enhance_config(["foo.bar"], config, None)
installer.enhance.assert_called_with("foo.bar", "ensure-http-header",
"Upgrade-Insecure-Requests")
config = ConfigHelper(redirect=False, hsts=False,
uir=False, staple=True)
self.client.enhance_config(["foo.bar"], config, None)
installer.enhance.assert_called_with("foo.bar", "staple-ocsp", None)
self.assertEqual(installer.save.call_count, 4)
self.assertEqual(installer.restart.call_count, 4)
@mock.patch("certbot.client.enhancements")
def test_enhance_config_unsupported(self, mock_enhancements):
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = []
config = ConfigHelper(redirect=None, hsts=True, uir=True)
self.client.enhance_config(["foo.bar"], config, None)
installer.enhance.assert_not_called()
mock_enhancements.ask.assert_not_called()
def test_enhance_config_no_installer(self):
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.Error, self.client.enhance_config,
["foo.bar"], config, None)
@mock.patch("certbot.client.zope.component.getUtility")
@mock.patch("certbot.client.enhancements")
def test_enhance_config_enhance_failure(self, mock_enhancements,
mock_get_utility):
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect"]
installer.enhance.side_effect = errors.PluginError
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.PluginError, self.client.enhance_config,
["foo.bar"], config, None)
installer.recovery_routine.assert_called_once_with()
self.assertEqual(mock_get_utility().add_message.call_count, 1)
@mock.patch("certbot.client.zope.component.getUtility")
@mock.patch("certbot.client.enhancements")
def test_enhance_config_save_failure(self, mock_enhancements,
mock_get_utility):
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect"]
installer.save.side_effect = errors.PluginError
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.PluginError, self.client.enhance_config,
["foo.bar"], config, None)
installer.recovery_routine.assert_called_once_with()
self.assertEqual(mock_get_utility().add_message.call_count, 1)
@mock.patch("certbot.client.zope.component.getUtility")
@mock.patch("certbot.client.enhancements")
def test_enhance_config_restart_failure(self, mock_enhancements,
mock_get_utility):
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect"]
installer.restart.side_effect = [errors.PluginError, None]
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.PluginError, self.client.enhance_config,
["foo.bar"], config, None)
self.assertEqual(mock_get_utility().add_message.call_count, 1)
installer.rollback_checkpoints.assert_called_once_with()
self.assertEqual(installer.restart.call_count, 2)
@mock.patch("certbot.client.zope.component.getUtility")
@mock.patch("certbot.client.enhancements")
def test_enhance_config_restart_failure2(self, mock_enhancements,
mock_get_utility):
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect"]
installer.restart.side_effect = errors.PluginError
installer.rollback_checkpoints.side_effect = errors.ReverterError
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.PluginError, self.client.enhance_config,
["foo.bar"], config, None)
self.assertEqual(mock_get_utility().add_message.call_count, 1)
installer.rollback_checkpoints.assert_called_once_with()
self.assertEqual(installer.restart.call_count, 1)
class RollbackTest(unittest.TestCase):
"""Tests for certbot.client.rollback."""
def setUp(self):
self.m_install = mock.MagicMock()
@classmethod
def _call(cls, checkpoints, side_effect):
from certbot.client import rollback
with mock.patch("certbot.client.plugin_selection.pick_installer") as mpi:
mpi.side_effect = side_effect
rollback(None, checkpoints, {}, mock.MagicMock())
def test_no_problems(self):
self._call(1, self.m_install)
self.assertEqual(self.m_install().rollback_checkpoints.call_count, 1)
self.assertEqual(self.m_install().restart.call_count, 1)
def test_no_installer(self):
self._call(1, None) # Just make sure no exceptions are raised
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
|
"""
Users
=====
"""
from pipes import quote
import posixpath
import random
import string
from fabric.api import hide, run, settings, sudo, local
import six
from fabtools.group import (
exists as _group_exists,
create as _group_create,
)
from fabtools.files import uncommented_lines
from fabtools.utils import run_as_root
def exists(name):
"""
Check if a user exists.
"""
with settings(hide('running', 'stdout', 'warnings'), warn_only=True):
return run('getent passwd %(name)s' % locals()).succeeded
_SALT_CHARS = string.ascii_letters + string.digits + './'
def _crypt_password(password):
from crypt import crypt
random.seed()
salt = ''
for _ in range(2):
salt += random.choice(_SALT_CHARS)
crypted_password = crypt(password, salt)
return crypted_password
def create(name, comment=None, home=None, create_home=None, skeleton_dir=None,
group=None, create_group=True, extra_groups=None, password=None,
system=False, shell=None, uid=None, ssh_public_keys=None,
non_unique=False):
"""
Create a new user and its home directory.
If *create_home* is ``None`` (the default), a home directory will be
created for normal users, but not for system users.
You can override the default behaviour by setting *create_home* to
``True`` or ``False``.
If *system* is ``True``, the user will be a system account. Its UID
will be chosen in a specific range, and it will not have a home
directory, unless you explicitely set *create_home* to ``True``.
If *shell* is ``None``, the user's login shell will be the system's
default login shell (usually ``/bin/sh``).
*ssh_public_keys* can be a (local) filename or a list of (local)
filenames of public keys that should be added to the user's SSH
authorized keys (see :py:func:`fabtools.user.add_ssh_public_keys`).
Example::
import fabtools
if not fabtools.user.exists('alice'):
fabtools.user.create('alice')
with cd('/home/alice'):
# ...
"""
# Note that we use useradd (and not adduser), as it is the most
# portable command to create users across various distributions:
# http://refspecs.linuxbase.org/LSB_4.1.0/LSB-Core-generic/LSB-Core-generic/useradd.html
args = []
if comment:
args.append('-c %s' % quote(comment))
if home:
args.append('-d %s' % quote(home))
if group:
args.append('-g %s' % quote(group))
if create_group:
if not _group_exists(group):
_group_create(group)
if extra_groups:
groups = ','.join(quote(group) for group in extra_groups)
args.append('-G %s' % groups)
if create_home is None:
create_home = not system
if create_home is True:
args.append('-m')
elif create_home is False:
args.append('-M')
if skeleton_dir:
args.append('-k %s' % quote(skeleton_dir))
if password:
crypted_password = _crypt_password(password)
args.append('-p %s' % quote(crypted_password))
if system:
args.append('-r')
if shell:
args.append('-s %s' % quote(shell))
if uid:
args.append('-u %s' % uid)
if non_unique:
args.append('-o')
args.append(name)
args = ' '.join(args)
run_as_root('useradd %s' % args)
if ssh_public_keys:
if isinstance(ssh_public_keys, six.string_types):
ssh_public_keys = [ssh_public_keys]
add_ssh_public_keys(name, ssh_public_keys)
def modify(name, comment=None, home=None, move_current_home=False, group=None,
extra_groups=None, login_name=None, password=None, shell=None,
uid=None, ssh_public_keys=None, non_unique=False):
"""
Modify an existing user.
*ssh_public_keys* can be a (local) filename or a list of (local)
filenames of public keys that should be added to the user's SSH
authorized keys (see :py:func:`fabtools.user.add_ssh_public_keys`).
Example::
import fabtools
if fabtools.user.exists('alice'):
fabtools.user.modify('alice', shell='/bin/sh')
"""
args = []
if comment:
args.append('-c %s' % quote(comment))
if home:
args.append('-d %s' % quote(home))
if move_current_home:
args.append('-m')
if group:
args.append('-g %s' % quote(group))
if extra_groups:
groups = ','.join(quote(group) for group in extra_groups)
args.append('-G %s' % groups)
if login_name:
args.append('-l %s' % quote(login_name))
if password:
crypted_password = _crypt_password(password)
args.append('-p %s' % quote(crypted_password))
if shell:
args.append('-s %s' % quote(shell))
if uid:
args.append('-u %s' % quote(uid))
if non_unique:
args.append('-o')
if args:
args.append(name)
args = ' '.join(args)
run_as_root('usermod %s' % args)
if ssh_public_keys:
if isinstance(ssh_public_keys, six.string_types):
ssh_public_keys = [ssh_public_keys]
add_ssh_public_keys(name, ssh_public_keys)
def home_directory(name):
"""
Get the absolute path to the user's home directory
Example::
import fabtools
home = fabtools.user.home_directory('alice')
"""
with settings(hide('running', 'stdout')):
return run('echo ~' + name)
def local_home_directory(name=''):
"""
Get the absolute path to the local user's home directory
Example::
import fabtools
local_home = fabtools.user.local_home_directory()
"""
with settings(hide('running', 'stdout')):
return local('echo ~' + name, capture=True)
def authorized_keys(name):
"""
Get the list of authorized SSH public keys for the user
"""
ssh_dir = posixpath.join(home_directory(name), '.ssh')
authorized_keys_filename = posixpath.join(ssh_dir, 'authorized_keys')
return uncommented_lines(authorized_keys_filename, use_sudo=True)
def add_ssh_public_key(name, filename):
"""
Add a public key to the user's authorized SSH keys.
*filename* must be the local filename of a public key that should be
added to the user's SSH authorized keys.
Example::
import fabtools
fabtools.user.add_ssh_public_key('alice', '~/.ssh/id_rsa.pub')
"""
add_ssh_public_keys(name, [filename])
def add_ssh_public_keys(name, filenames):
"""
Add multiple public keys to the user's authorized SSH keys.
*filenames* must be a list of local filenames of public keys that
should be added to the user's SSH authorized keys.
Example::
import fabtools
fabtools.user.add_ssh_public_keys('alice', [
'~/.ssh/id1_rsa.pub',
'~/.ssh/id2_rsa.pub',
])
"""
from fabtools.require.files import (
directory as _require_directory,
file as _require_file,
)
ssh_dir = posixpath.join(home_directory(name), '.ssh')
_require_directory(ssh_dir, mode='700', owner=name, use_sudo=True)
authorized_keys_filename = posixpath.join(ssh_dir, 'authorized_keys')
_require_file(authorized_keys_filename, mode='600', owner=name,
use_sudo=True)
for filename in filenames:
with open(filename) as public_key_file:
public_keys = public_key_file.read().strip().split("\n")
# we don't use fabric.contrib.files.append() as it's buggy
for public_key in public_keys:
if public_key not in authorized_keys(name):
sudo('echo %s >>%s' % (quote(public_key),
quote(authorized_keys_filename)))
def add_host_keys(name, hostname):
"""
Add all public keys of a host to the user's SSH known hosts file
"""
from fabtools.require.files import (
directory as _require_directory,
file as _require_file,
)
ssh_dir = posixpath.join(home_directory(name), '.ssh')
_require_directory(ssh_dir, mode='700', owner=name, use_sudo=True)
known_hosts_filename = posixpath.join(ssh_dir, 'known_hosts')
_require_file(known_hosts_filename, mode='644', owner=name, use_sudo=True)
known_hosts = uncommented_lines(known_hosts_filename, use_sudo=True)
with hide('running', 'stdout'):
res = run('ssh-keyscan -t rsa,dsa %s 2>/dev/null' % hostname)
for host_key in res.splitlines():
if host_key not in known_hosts:
sudo('echo %s >>%s' % (quote(host_key),
quote(known_hosts_filename)))
|
|
import collections.abc
import difflib
import itertools
import re
import textwrap
import traceback
import typing
from mitmproxy.proxy import commands, context, layer
from mitmproxy.proxy import events
from mitmproxy.connection import ConnectionState
from mitmproxy.proxy.events import command_reply_subclasses
from mitmproxy.proxy.layer import Layer
PlaybookEntry = typing.Union[commands.Command, events.Event]
PlaybookEntryList = typing.List[PlaybookEntry]
def _eq(
a: PlaybookEntry,
b: PlaybookEntry
) -> bool:
"""Compare two commands/events, and possibly update placeholders."""
if type(a) != type(b):
return False
a_dict = a.__dict__
b_dict = b.__dict__
# we can assume a.keys() == b.keys()
for k in a_dict:
if k == "blocking":
continue
x = a_dict[k]
y = b_dict[k]
# if there's a placeholder, make it x.
if isinstance(y, _Placeholder):
x, y = y, x
if isinstance(x, _Placeholder):
try:
x = x.setdefault(y)
except TypeError as e:
raise TypeError(f"Placeholder type error for {type(a).__name__}.{k}: {e}")
if x != y:
return False
return True
def eq(
a: typing.Union[PlaybookEntry, typing.Iterable[PlaybookEntry]],
b: typing.Union[PlaybookEntry, typing.Iterable[PlaybookEntry]]
):
"""
Compare an indiviual event/command or a list of events/commands.
"""
if isinstance(a, collections.abc.Iterable) and isinstance(b, collections.abc.Iterable):
return all(
_eq(x, y) for x, y in itertools.zip_longest(a, b)
)
return _eq(a, b)
def _fmt_entry(x: PlaybookEntry):
arrow = ">>" if isinstance(x, events.Event) else "<<"
x = str(x)
x = re.sub('Placeholder:None', '<unset placeholder>', x, flags=re.IGNORECASE)
x = re.sub('Placeholder:', '', x, flags=re.IGNORECASE)
x = textwrap.indent(x, " ")[5:]
return f"{arrow} {x}"
def _merge_sends(lst: typing.List[commands.Command], ignore_hooks: bool, ignore_logs: bool) -> PlaybookEntryList:
current_send = None
for x in lst:
if isinstance(x, commands.SendData):
if current_send is None or current_send.connection != x.connection:
current_send = x
yield x
else:
current_send.data += x.data
else:
ignore = (
(ignore_hooks and isinstance(x, commands.StartHook))
or
(ignore_logs and isinstance(x, commands.Log))
)
if not ignore:
current_send = None
yield x
class _TracebackInPlaybook(commands.Command):
def __init__(self, exc):
self.e = exc
def __repr__(self):
return self.e
class Playbook:
"""
Assert that a layer emits the expected commands in reaction to a given sequence of events.
For example, the following code asserts that the TCP layer emits an OpenConnection command
immediately after starting and does not yield any further commands as a reaction to successful
connection establishment.
assert playbook(tcp.TCPLayer(tctx)) \
<< commands.OpenConnection(tctx.server)
>> reply(None)
<< None # this line is optional.
This is syntactic sugar for the following:
t = tcp.TCPLayer(tctx)
x1 = list(t.handle_event(events.Start()))
assert x1 == [commands.OpenConnection(tctx.server)]
x2 = list(t.handle_event(events.OpenConnectionReply(x1[-1])))
assert x2 == []
"""
layer: Layer
"""The base layer"""
expected: PlaybookEntryList
"""expected command/event sequence"""
actual: PlaybookEntryList
"""actual command/event sequence"""
_errored: bool
"""used to check if playbook as been fully asserted"""
logs: bool
"""If False, the playbook specification doesn't contain log commands."""
hooks: bool
"""If False, the playbook specification doesn't include hooks or hook replies. They are automatically replied to."""
def __init__(
self,
layer: Layer,
hooks: bool = True,
logs: bool = False,
expected: typing.Optional[PlaybookEntryList] = None,
):
if expected is None:
expected = [
events.Start()
]
self.layer = layer
self.expected = expected
self.actual = []
self._errored = False
self.logs = logs
self.hooks = hooks
def __rshift__(self, e):
"""Add an event to send"""
assert isinstance(e, events.Event)
self.expected.append(e)
return self
def __lshift__(self, c):
"""Add an expected command"""
if c is None:
return self
assert isinstance(c, commands.Command)
prev = self.expected[-1]
two_subsequent_sends_to_the_same_remote = (
isinstance(c, commands.SendData)
and isinstance(prev, commands.SendData)
and prev.connection is c.connection
)
if two_subsequent_sends_to_the_same_remote:
prev.data += c.data
else:
self.expected.append(c)
return self
def __bool__(self):
"""Determine if playbook is correct."""
already_asserted = len(self.actual)
i = already_asserted
while i < len(self.expected):
x = self.expected[i]
if isinstance(x, commands.Command):
pass
else:
if hasattr(x, "playbook_eval"):
try:
x = self.expected[i] = x.playbook_eval(self)
except Exception:
self.actual.append(_TracebackInPlaybook(traceback.format_exc()))
break
for name, value in vars(x).items():
if isinstance(value, _Placeholder):
setattr(x, name, value())
if isinstance(x, events.OpenConnectionCompleted) and not x.reply:
x.command.connection.state = ConnectionState.OPEN
x.command.connection.timestamp_start = 1624544785
elif isinstance(x, events.ConnectionClosed):
x.connection.state &= ~ConnectionState.CAN_READ
x.connection.timestamp_end = 1624544787
self.actual.append(x)
try:
cmds: typing.List[commands.Command] = list(self.layer.handle_event(x))
except Exception:
self.actual.append(_TracebackInPlaybook(traceback.format_exc()))
break
cmds = list(_merge_sends(cmds, ignore_hooks=not self.hooks, ignore_logs=not self.logs))
self.actual.extend(cmds)
pos = len(self.actual) - len(cmds) - 1
hook_replies = []
for cmd in cmds:
pos += 1
assert self.actual[pos] == cmd
if isinstance(cmd, commands.CloseConnection):
if cmd.half_close:
cmd.connection.state &= ~ConnectionState.CAN_WRITE
else:
cmd.connection.state = ConnectionState.CLOSED
elif isinstance(cmd, commands.Log):
need_to_emulate_log = (
not self.logs and
cmd.level in ("debug", "info") and
(
pos >= len(self.expected)
or not isinstance(self.expected[pos], commands.Log)
)
)
if need_to_emulate_log:
self.expected.insert(pos, cmd)
elif isinstance(cmd, commands.StartHook) and not self.hooks:
need_to_emulate_hook = (
not self.hooks
and (
pos >= len(self.expected) or
(not (
isinstance(self.expected[pos], commands.StartHook)
and self.expected[pos].name == cmd.name
))
)
)
if need_to_emulate_hook:
self.expected.insert(pos, cmd)
if cmd.blocking:
# the current event may still have yielded more events, so we need to insert
# the reply *after* those additional events.
hook_replies.append(events.HookCompleted(cmd))
self.expected = self.expected[:pos + 1] + hook_replies + self.expected[pos + 1:]
eq(self.expected[i:], self.actual[i:]) # compare now already to set placeholders
i += 1
if not eq(self.expected, self.actual):
self._errored = True
diffs = list(difflib.ndiff(
[_fmt_entry(x) for x in self.expected],
[_fmt_entry(x) for x in self.actual]
))
if already_asserted:
diffs.insert(already_asserted, "==== asserted until here ====")
diff = "\n".join(diffs)
raise AssertionError(f"Playbook mismatch!\n{diff}")
else:
return True
def __del__(self):
# Playbooks are only executed on assert (which signals that the playbook is partially
# complete), so we need to signal if someone forgets to assert and playbooks aren't
# evaluated.
is_final_destruct = not hasattr(self, "_errored")
if is_final_destruct or (not self._errored and len(self.actual) < len(self.expected)):
raise RuntimeError("Unfinished playbook!")
class reply(events.Event):
args: typing.Tuple[typing.Any, ...]
to: typing.Union[commands.Command, int]
side_effect: typing.Callable[[typing.Any], typing.Any]
def __init__(
self,
*args,
to: typing.Union[commands.Command, int] = -1,
side_effect: typing.Callable[[typing.Any], None] = lambda x: None
):
"""Utility method to reply to the latest hook in playbooks."""
assert not args or not isinstance(args[0], commands.Command)
self.args = args
self.to = to
self.side_effect = side_effect
def playbook_eval(self, playbook: Playbook) -> events.CommandCompleted:
if isinstance(self.to, int):
expected = playbook.expected[:playbook.expected.index(self)]
assert abs(self.to) < len(expected)
to = expected[self.to]
if not isinstance(to, commands.Command):
raise AssertionError(f"There is no command at offset {self.to}: {to}")
else:
self.to = to
for cmd in reversed(playbook.actual):
if eq(self.to, cmd):
self.to = cmd
break
else:
raise AssertionError(f"Expected command {self.to} did not occur.")
assert isinstance(self.to, commands.Command)
if isinstance(self.to, commands.StartHook):
self.side_effect(*self.to.args())
reply_cls = events.HookCompleted
else:
self.side_effect(self.to)
reply_cls = command_reply_subclasses[type(self.to)]
try:
inst = reply_cls(self.to, *self.args)
except TypeError as e:
raise ValueError(f"Cannot instantiate {reply_cls.__name__}: {e}")
return inst
T = typing.TypeVar("T")
class _Placeholder(typing.Generic[T]):
"""
Placeholder value in playbooks, so that objects (flows in particular) can be referenced before
they are known. Example:
f = Placeholder(TCPFlow)
assert (
playbook(tcp.TCPLayer(tctx))
<< TcpStartHook(f) # the flow object returned here is generated by the layer.
)
# We can obtain the flow object now using f():
assert f().messages == 0
"""
def __init__(self, cls: typing.Type[T]):
self._obj = None
self._cls = cls
def __call__(self) -> T:
"""Get the actual object"""
return self._obj
def setdefault(self, value: T) -> T:
if self._obj is None:
if self._cls is not typing.Any and not isinstance(value, self._cls):
raise TypeError(f"expected {self._cls.__name__}, got {type(value).__name__}.")
self._obj = value
return self._obj
def __repr__(self):
return f"Placeholder:{repr(self._obj)}"
def __str__(self):
return f"Placeholder:{str(self._obj)}"
# noinspection PyPep8Naming
def Placeholder(cls: typing.Type[T] = typing.Any) -> typing.Union[T, _Placeholder[T]]:
return _Placeholder(cls)
class EchoLayer(Layer):
"""Echo layer that sends all data back to the client in lowercase."""
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
if isinstance(event, events.DataReceived):
yield commands.SendData(event.connection, event.data.lower())
if isinstance(event, events.ConnectionClosed):
yield commands.CloseConnection(event.connection)
class RecordLayer(Layer):
"""Layer that records all events but does nothing."""
event_log: typing.List[events.Event]
def __init__(self, context: context.Context) -> None:
super().__init__(context)
self.event_log = []
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
self.event_log.append(event)
yield from ()
def reply_next_layer(
child_layer: typing.Union[typing.Type[Layer], typing.Callable[[context.Context], Layer]],
*args,
**kwargs
) -> reply:
"""Helper function to simplify the syntax for next_layer events to this:
<< NextLayerHook(nl)
>> reply_next_layer(tutils.EchoLayer)
"""
def set_layer(next_layer: layer.NextLayer) -> None:
next_layer.layer = child_layer(next_layer.context)
return reply(*args, side_effect=set_layer, **kwargs)
|
|
# Copyright (c) 2015 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from oslo_log import log as logging
import six
from six.moves import range
from congress.datalog import base
from congress.datalog import builtin
from congress.datalog import compile
from congress.datalog import unify
from congress.datalog import utility
LOG = logging.getLogger(__name__)
class TopDownTheory(base.Theory):
"""Class that holds the Top-Down evaluation routines.
Classes will inherit from this class if they want to import and specialize
those routines.
"""
class TopDownContext(object):
"""Struct for storing the search state of top-down evaluation."""
def __init__(self, literals, literal_index, binding, context, theory,
depth):
self.literals = literals
self.literal_index = literal_index
self.binding = binding
self.previous = context
self.theory = theory # a theory object, not just its name
self.depth = depth
def __str__(self):
return (
"TopDownContext<literals={}, literal_index={}, binding={}, "
"previous={}, theory={}, depth={}>").format(
"[" + ",".join([str(x) for x in self.literals]) + "]",
str(self.literal_index), str(self.binding),
str(self.previous), self.theory.name, str(self.depth))
class TopDownResult(object):
"""Stores a single result for top-down-evaluation."""
def __init__(self, binding, support):
self.binding = binding
self.support = support # for abduction
def __str__(self):
return "TopDownResult(binding={}, support={})".format(
unify.binding_str(self.binding), utility.iterstr(self.support))
class TopDownCaller(object):
"""Struct for info about the original caller of top-down evaluation.
VARIABLES is the list of variables (from the initial query)
that we want bindings for.
BINDING is the initially empty BiUnifier.
FIND_ALL controls whether just the first or all answers are found.
ANSWERS is populated by top-down evaluation: it is the list of
VARIABLES instances that the search process proved true.
"""
def __init__(self, variables, binding, theory,
find_all=True, save=None):
# an iterable of variable objects
self.variables = variables
# a bi-unifier
self.binding = binding
# the top-level theory (for included theories)
self.theory = theory
# a boolean
self.find_all = find_all
# The results of top-down-eval: a list of TopDownResults
self.results = []
# a Function that takes a compile.Literal and a unifier and
# returns T iff that literal under the unifier should be
# saved as part of an abductive explanation
self.save = save
# A variable used to store explanations as they are constructed
self.support = []
def __str__(self):
return (
"TopDownCaller<variables={}, binding={}, find_all={}, "
"results={}, save={}, support={}>".format(
utility.iterstr(self.variables), str(self.binding),
str(self.find_all), utility.iterstr(self.results),
repr(self.save), utility.iterstr(self.support)))
#########################################
# External interface
def __init__(self, name=None, abbr=None, theories=None, schema=None,
desc=None, owner=None):
super(TopDownTheory, self).__init__(
name=name, abbr=abbr, theories=theories, schema=schema,
desc=desc, owner=owner)
self.includes = []
def select(self, query, find_all=True):
"""Return list of instances of QUERY that are true.
If FIND_ALL is False, the return list has at most 1 element.
"""
assert compile.is_datalog(query), "Query must be atom/rule"
if compile.is_atom(query):
literals = [query]
else:
literals = query.body
# Because our output is instances of QUERY, need all the variables
# in QUERY.
bindings = self.top_down_evaluation(query.variables(), literals,
find_all=find_all)
# LOG.debug("Top_down_evaluation returned: %s", bindings)
if len(bindings) > 0:
self.log(query.tablename(), "Found answer %s",
"[" + ",".join([str(query.plug(x))
for x in bindings]) + "]")
return [query.plug(x) for x in bindings]
def explain(self, query, tablenames, find_all=True):
"""Return list of instances of QUERY that are true.
Same as select except stores instances of TABLENAMES
that participated in each proof. If QUERY is an atom,
returns list of rules with QUERY in the head and
the stored instances of TABLENAMES in the body; if QUERY is
a rule, the rules returned have QUERY's head in the head
and the stored instances of TABLENAMES in the body.
"""
# This is different than abduction because instead of replacing
# a proof attempt with saving a literal, we want to save a literal
# after a successful proof attempt.
assert False, "Not yet implemented"
def abduce(self, query, tablenames, find_all=True):
"""Compute additional literals.
Computes additional literals that if true would make
(some instance of) QUERY true. Returns a list of rules
where the head represents an instance of the QUERY and
the body is the collection of literals that must be true
in order to make that instance true. If QUERY is a rule,
each result is an instance of the head of that rule, and
the computed literals if true make the body of that rule
(and hence the head) true. If FIND_ALL is true, the
return list has at most one element.
Limitation: every negative literal relevant to a proof of
QUERY is unconditionally true, i.e. no literals are saved
when proving a negative literal is true.
"""
assert compile.is_datalog(query), "abduce requires a formula"
if compile.is_atom(query):
literals = [query]
output = query
else:
literals = query.body
output = query.head
# We need all the variables we will be using in the output, which
# here is just the head of QUERY (or QUERY itself if it is an atom)
abductions = self.top_down_abduction(
output.variables(), literals, find_all=find_all,
save=lambda lit, binding: lit.tablename() in tablenames)
results = [compile.Rule(output.plug(abd.binding), abd.support)
for abd in abductions]
self.log(query.tablename(), "abduction result:")
self.log(query.tablename(), "\n".join([str(x) for x in results]))
return results
def consequences(self, filter=None, table_theories=None):
"""Return all the true instances of any table in this theory."""
# find all table, theory pairs defined in this theory
if table_theories is None:
table_theories = set()
for key in self.rules.keys():
table_theories |= set([(rule.head.table.table,
rule.head.table.service)
for rule in self.rules.get_rules(key)])
results = set()
# create queries: need table names and arities
# TODO(thinrichs): arity computation will need to ignore
# modals once we start using insert[p(x)] instead of p+(x)
for (table, theory) in table_theories:
if filter is None or filter(table):
tablename = compile.Tablename(table, theory)
arity = self.arity(tablename)
vs = []
for i in range(0, arity):
vs.append("x" + str(i))
vs = [compile.Variable(var) for var in vs]
tablename = table
if theory:
tablename = theory + ":" + tablename
query = compile.Literal(tablename, vs)
results |= set(self.select(query))
return results
def top_down_evaluation(self, variables, literals,
binding=None, find_all=True):
"""Compute bindings.
Compute all bindings of VARIABLES that make LITERALS
true according to the theory (after applying the unifier BINDING).
If FIND_ALL is False, stops after finding one such binding.
Returns a list of dictionary bindings.
"""
# LOG.debug("CALL: top_down_evaluation(vars=%s, literals=%s, "
# "binding=%s)",
# ";".join(str(x) for x in variables),
# ";".join(str(x) for x in literals),
# str(binding))
results = self.top_down_abduction(variables, literals,
binding=binding, find_all=find_all,
save=None)
# LOG.debug("EXIT: top_down_evaluation(vars=%s, literals=%s, "
# "binding=%s) returned %s",
# iterstr(variables), iterstr(literals),
# str(binding), iterstr(results))
return [x.binding for x in results]
def top_down_abduction(self, variables, literals, binding=None,
find_all=True, save=None):
"""Compute bindings.
Compute all bindings of VARIABLES that make LITERALS
true according to the theory (after applying the
unifier BINDING), if we add some number of additional
literals. Note: will not save any literals that are
needed to prove a negated literal since the results
would not make sense. Returns a list of TopDownResults.
"""
if binding is None:
binding = self.new_bi_unifier()
caller = self.TopDownCaller(variables, binding, self,
find_all=find_all, save=save)
if len(literals) == 0:
self._top_down_finish(None, caller)
else:
# Note: must use same unifier in CALLER and CONTEXT
context = self.TopDownContext(literals, 0, binding, None, self, 0)
self._top_down_eval(context, caller)
return list(set(caller.results))
#########################################
# Internal implementation
def _top_down_eval(self, context, caller):
"""Compute instances.
Compute all instances of LITERALS (from LITERAL_INDEX and above)
that are true according to the theory (after applying the
unifier BINDING to LITERALS).
Returns True if done searching and False otherwise.
"""
# no recursive rules, ever; this style of algorithm will not terminate
lit = context.literals[context.literal_index]
# LOG.debug("CALL: %s._top_down_eval(%s, %s)",
# self.name, context, caller)
# abduction
if caller.save is not None and caller.save(lit, context.binding):
self._print_call(lit, context.binding, context.depth)
# save lit and binding--binding may not be fully flushed out
# when we save (or ever for that matter)
caller.support.append((lit, context.binding))
self._print_save(lit, context.binding, context.depth)
success = self._top_down_finish(context, caller)
caller.support.pop() # pop in either case
if success:
return True
else:
self._print_fail(lit, context.binding, context.depth)
return False
# regular processing
if lit.is_negated():
# LOG.debug("%s is negated", lit)
# recurse on the negation of the literal
plugged = lit.plug(context.binding)
assert plugged.is_ground(), (
"Negated literal not ground when evaluated: " +
str(plugged))
self._print_call(lit, context.binding, context.depth)
new_context = self.TopDownContext(
[lit.complement()], 0, context.binding, None,
self, context.depth + 1)
new_caller = self.TopDownCaller(caller.variables, caller.binding,
caller.theory, find_all=False,
save=None)
# Make sure new_caller has find_all=False, so we stop as soon
# as we can.
# Ensure save=None so that abduction does not save anything.
# Saving while performing NAF makes no sense.
self._top_down_eval(new_context, new_caller)
if len(new_caller.results) > 0:
self._print_fail(lit, context.binding, context.depth)
return False # not done searching, b/c we failed
else:
# don't need bindings b/c LIT must be ground
return self._top_down_finish(context, caller, redo=False)
elif lit.tablename() == 'true':
self._print_call(lit, context.binding, context.depth)
return self._top_down_finish(context, caller, redo=False)
elif lit.tablename() == 'false':
self._print_fail(lit, context.binding, context.depth)
return False
elif lit.is_builtin():
return self._top_down_builtin(context, caller)
elif (self.theories is not None and
lit.table.service is not None and
lit.table.modal is None and # not a modal
lit.table.service != self.name and
not lit.is_update()): # not a pseudo-modal
return self._top_down_module(context, caller)
else:
return self._top_down_truth(context, caller)
def _top_down_builtin(self, context, caller):
"""Evaluate a table with a builtin semantics.
Returns True if done searching and False otherwise.
"""
lit = context.literals[context.literal_index]
self._print_call(lit, context.binding, context.depth)
built = builtin.builtin_registry.builtin(lit.table)
# copy arguments into variables
# PLUGGED is an instance of compile.Literal
plugged = lit.plug(context.binding)
# PLUGGED.arguments is a list of compile.Term
# create args for function
args = []
for i in range(0, built.num_inputs):
# save builtins with unbound vars during evaluation
if not plugged.arguments[i].is_object() and caller.save:
# save lit and binding--binding may not be fully flushed out
# when we save (or ever for that matter)
caller.support.append((lit, context.binding))
self._print_save(lit, context.binding, context.depth)
success = self._top_down_finish(context, caller)
caller.support.pop() # pop in either case
if success:
return True
else:
self._print_fail(lit, context.binding, context.depth)
return False
assert plugged.arguments[i].is_object(), (
("Builtins must be evaluated only after their "
"inputs are ground: {} with num-inputs {}".format(
str(plugged), builtin.num_inputs)))
args.append(plugged.arguments[i].name)
# evaluate builtin: must return number, string, or iterable
# of numbers/strings
try:
result = built.code(*args)
except Exception as e:
errmsg = "Error in builtin: " + str(e)
self._print_note(lit, context.binding, context.depth, errmsg)
self._print_fail(lit, context.binding, context.depth)
return False
# self._print_note(lit, context.binding, context.depth,
# "Result: " + str(result))
success = None
undo = []
if built.num_outputs > 0:
# with return values, local success means we can bind
# the results to the return value arguments
if (isinstance(result,
(six.integer_types, float, six.string_types))):
result = [result]
# Turn result into normal objects
result = [compile.Term.create_from_python(x) for x in result]
# adjust binding list
unifier = self.new_bi_unifier()
undo = unify.bi_unify_lists(result,
unifier,
lit.arguments[built.num_inputs:],
context.binding)
success = undo is not None
else:
# without return values, local success means
# result was True according to Python
success = bool(result)
if not success:
self._print_fail(lit, context.binding, context.depth)
unify.undo_all(undo)
return False
# otherwise, try to finish proof. If success, return True
if self._top_down_finish(context, caller, redo=False):
unify.undo_all(undo)
return True
# if fail, return False.
else:
unify.undo_all(undo)
self._print_fail(lit, context.binding, context.depth)
return False
def _top_down_module(self, context, caller):
"""Move to another theory and continue evaluation."""
# LOG.debug("%s._top_down_module(%s)", self.name, context)
lit = context.literals[context.literal_index]
if lit.table.service not in self.theories:
self._print_call(lit, context.binding, context.depth)
errmsg = "No such policy: %s" % lit.table.service
self._print_note(lit, context.binding, context.depth, errmsg)
self._print_fail(lit, context.binding, context.depth)
return False
return self.theories[lit.table.service]._top_down_eval(context, caller)
def _top_down_truth(self, context, caller):
"""Top down evaluation.
Do top-down evaluation over the root theory at which
the call was made and all the included theories.
"""
# return self._top_down_th(context, caller)
return self._top_down_includes(context, caller)
def _top_down_includes(self, context, caller):
"""Top-down evaluation of all the theories included in this theory."""
is_true = self._top_down_th(context, caller)
if is_true and not caller.find_all:
return True
for th in self.includes:
is_true = th._top_down_includes(context, caller)
if is_true and not caller.find_all:
return True
return False
def _top_down_th(self, context, caller):
"""Top-down evaluation for the rules in self."""
# LOG.debug("%s._top_down_th(%s)", self.name, context)
lit = context.literals[context.literal_index]
self._print_call(lit, context.binding, context.depth)
for rule in self.head_index(lit.table.table,
lit.plug(context.binding)):
unifier = self.new_bi_unifier()
self._print_note(lit, context.binding, context.depth,
"Trying %s" % rule)
# Prefer to bind vars in rule head
undo = self.bi_unify(self.head(rule), unifier, lit,
context.binding, self.name)
if undo is None: # no unifier
continue
if len(self.body(rule)) == 0:
if self._top_down_finish(context, caller):
unify.undo_all(undo)
if not caller.find_all:
return True
else:
unify.undo_all(undo)
else:
new_context = self.TopDownContext(
rule.body, 0, unifier, context, self, context.depth + 1)
if self._top_down_eval(new_context, caller):
unify.undo_all(undo)
if not caller.find_all:
return True
else:
unify.undo_all(undo)
self._print_fail(lit, context.binding, context.depth)
return False
def _top_down_finish(self, context, caller, redo=True):
"""Helper function.
This is called once top_down successfully completes
a proof for a literal. Handles (i) continuing search
for those literals still requiring proofs within CONTEXT,
(ii) adding solutions to CALLER once all needed proofs have
been found, and (iii) printing out Redo/Exit during tracing.
Returns True if the search is finished and False otherwise.
Temporary, transparent modification of CONTEXT.
"""
if context is None:
# Found an answer; now store it
if caller is not None:
# flatten bindings and store before we undo
# copy caller.support and store before we undo
binding = {}
for var in caller.variables:
binding[var] = caller.binding.apply(var)
result = self.TopDownResult(
binding, [support[0].plug(support[1], caller=caller)
for support in caller.support])
caller.results.append(result)
return True
else:
self._print_exit(context.literals[context.literal_index],
context.binding, context.depth)
# continue the search
if context.literal_index < len(context.literals) - 1:
context.literal_index += 1
finished = context.theory._top_down_eval(context, caller)
context.literal_index -= 1 # in case answer is False
else:
finished = self._top_down_finish(context.previous, caller)
# return search result (after printing a Redo if failure)
if redo and (not finished or caller.find_all):
self._print_redo(context.literals[context.literal_index],
context.binding, context.depth)
return finished
def _print_call(self, literal, binding, depth):
msg = "{}Call: %s".format("| " * depth)
self.log(literal.tablename(), msg, literal.plug(binding))
def _print_exit(self, literal, binding, depth):
msg = "{}Exit: %s".format("| " * depth)
self.log(literal.tablename(), msg, literal.plug(binding))
def _print_save(self, literal, binding, depth):
msg = "{}Save: %s".format("| " * depth)
self.log(literal.tablename(), msg, literal.plug(binding))
def _print_fail(self, literal, binding, depth):
msg = "{}Fail: %s".format("| " * depth)
self.log(literal.tablename(), msg, literal.plug(binding))
return False
def _print_redo(self, literal, binding, depth):
msg = "{}Redo: %s".format("| " * depth)
self.log(literal.tablename(), msg, literal.plug(binding))
return False
def _print_note(self, literal, binding, depth, msg):
self.log(literal.tablename(), "{}Note: {}".format("| " * depth,
msg))
#########################################
# Routines for specialization
@classmethod
def new_bi_unifier(cls, dictionary=None):
"""Return a unifier compatible with unify.bi_unify."""
return unify.BiUnifier(dictionary=dictionary)
# lambda (index):
# compile.Variable("x" + str(index)), dictionary=dictionary)
def defined_tablenames(self):
"""Returns list of table names defined in/written to this theory."""
raise NotImplementedError
def head_index(self, table, match_literal=None):
"""Return head index.
This routine must return all the formulas pertinent for
top-down evaluation when a literal with TABLE is at the top
of the stack.
"""
raise NotImplementedError
def head(self, formula):
"""Given the output from head_index(), return the formula head.
Given a FORMULA, return the thing to unify against.
Usually, FORMULA is a compile.Rule, but it could be anything
returned by HEAD_INDEX.
"""
raise NotImplementedError
def body(self, formula):
"""Return formula body.
Given a FORMULA, return a list of things to push onto the
top-down eval stack.
"""
raise NotImplementedError
def bi_unify(self, head, unifier1, body_element, unifier2, theoryname):
"""Unify atoms.
Given something returned by self.head HEAD and an element in
the return of self.body BODY_ELEMENT, modify UNIFIER1 and UNIFIER2
so that HEAD.plug(UNIFIER1) == BODY_ELEMENT.plug(UNIFIER2).
Returns changes that can be undone via unify.undo-all.
THEORYNAME is the name of the theory for HEAD.
"""
return unify.bi_unify_atoms(head, unifier1, body_element, unifier2,
theoryname)
#########################################
# Routines for unknowns
def instances(self, rule, possibilities=None):
results = set([])
possibilities = possibilities or []
self._instances(rule, 0, self.new_bi_unifier(), results, possibilities)
return results
def _instances(self, rule, index, binding, results, possibilities):
"""Return all instances of the given RULE without evaluating builtins.
Assumes self.head_index returns rules with empty bodies.
"""
if index >= len(rule.body):
results.add(rule.plug(binding))
return
lit = rule.body[index]
self._print_call(lit, binding, 0)
# if already ground or a builtin, go to the next literal
if (lit.is_ground() or lit.is_builtin()):
self._instances(rule, index + 1, binding, results, possibilities)
return
# Otherwise, find instances in this theory
if lit.tablename() in possibilities:
options = possibilities[lit.tablename()]
else:
options = self.head_index(lit.tablename(), lit.plug(binding))
for data in options:
self._print_note(lit, binding, 0, "Trying: %s" % repr(data))
undo = unify.match_atoms(lit, binding, self.head(data))
if undo is None: # no unifier
continue
self._print_exit(lit, binding, 0)
# recurse on the rest of the literals in the rule
self._instances(rule, index + 1, binding, results, possibilities)
if undo is not None:
unify.undo_all(undo)
self._print_redo(lit, binding, 0)
self._print_fail(lit, binding, 0)
|
|
# Tests some corner cases with isinstance() and issubclass(). While these
# tests use new style classes and properties, they actually do whitebox
# testing of error conditions uncovered when using extension types.
import unittest
from test import test_support
import sys
class TestIsInstanceExceptions(unittest.TestCase):
# Test to make sure that an AttributeError when accessing the instance's
# class's bases is masked. This was actually a bug in Python 2.2 and
# 2.2.1 where the exception wasn't caught but it also wasn't being cleared
# (leading to an "undetected error" in the debug build). Set up is,
# isinstance(inst, cls) where:
#
# - inst isn't an InstanceType
# - cls isn't a ClassType, a TypeType, or a TupleType
# - cls has a __bases__ attribute
# - inst has a __class__ attribute
# - inst.__class__ as no __bases__ attribute
#
# Sounds complicated, I know, but this mimics a situation where an
# extension type raises an AttributeError when its __bases__ attribute is
# gotten. In that case, isinstance() should return False.
def test_class_has_no_bases(self):
class I(object):
def getclass(self):
# This must return an object that has no __bases__ attribute
return None
__class__ = property(getclass)
class C(object):
def getbases(self):
return ()
__bases__ = property(getbases)
self.assertEqual(False, isinstance(I(), C()))
# Like above except that inst.__class__.__bases__ raises an exception
# other than AttributeError
def test_bases_raises_other_than_attribute_error(self):
class E(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
class I(object):
def getclass(self):
return E()
__class__ = property(getclass)
class C(object):
def getbases(self):
return ()
__bases__ = property(getbases)
self.assertRaises(RuntimeError, isinstance, I(), C())
# Here's a situation where getattr(cls, '__bases__') raises an exception.
# If that exception is not AttributeError, it should not get masked
def test_dont_mask_non_attribute_error(self):
class I: pass
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
self.assertRaises(RuntimeError, isinstance, I(), C())
# Like above, except that getattr(cls, '__bases__') raises an
# AttributeError, which /should/ get masked as a TypeError
def test_mask_attribute_error(self):
class I: pass
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
self.assertRaises(TypeError, isinstance, I(), C())
# These tests are similar to above, but tickle certain code paths in
# issubclass() instead of isinstance() -- really PyObject_IsSubclass()
# vs. PyObject_IsInstance().
class TestIsSubclassExceptions(unittest.TestCase):
def test_dont_mask_non_attribute_error(self):
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
class S(C): pass
self.assertRaises(RuntimeError, issubclass, C(), S())
def test_mask_attribute_error(self):
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
class S(C): pass
self.assertRaises(TypeError, issubclass, C(), S())
# Like above, but test the second branch, where the __bases__ of the
# second arg (the cls arg) is tested. This means the first arg must
# return a valid __bases__, and it's okay for it to be a normal --
# unrelated by inheritance -- class.
def test_dont_mask_non_attribute_error_in_cls_arg(self):
class B: pass
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
self.assertRaises(RuntimeError, issubclass, B, C())
def test_mask_attribute_error_in_cls_arg(self):
class B: pass
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
self.assertRaises(TypeError, issubclass, B, C())
# meta classes for creating abstract classes and instances
class AbstractClass(object):
def __init__(self, bases):
self.bases = bases
def getbases(self):
return self.bases
__bases__ = property(getbases)
def __call__(self):
return AbstractInstance(self)
class AbstractInstance(object):
def __init__(self, klass):
self.klass = klass
def getclass(self):
return self.klass
__class__ = property(getclass)
# abstract classes
AbstractSuper = AbstractClass(bases=())
AbstractChild = AbstractClass(bases=(AbstractSuper,))
# normal classes
class Super:
pass
class Child(Super):
pass
# new-style classes
class NewSuper(object):
pass
class NewChild(NewSuper):
pass
class TestIsInstanceIsSubclass(unittest.TestCase):
# Tests to ensure that isinstance and issubclass work on abstract
# classes and instances. Before the 2.2 release, TypeErrors were
# raised when boolean values should have been returned. The bug was
# triggered by mixing 'normal' classes and instances were with
# 'abstract' classes and instances. This case tries to test all
# combinations.
def test_isinstance_normal(self):
# normal instances
self.assertEqual(True, isinstance(Super(), Super))
self.assertEqual(False, isinstance(Super(), Child))
self.assertEqual(False, isinstance(Super(), AbstractSuper))
self.assertEqual(False, isinstance(Super(), AbstractChild))
self.assertEqual(True, isinstance(Child(), Super))
self.assertEqual(False, isinstance(Child(), AbstractSuper))
def test_isinstance_abstract(self):
# abstract instances
self.assertEqual(True, isinstance(AbstractSuper(), AbstractSuper))
self.assertEqual(False, isinstance(AbstractSuper(), AbstractChild))
self.assertEqual(False, isinstance(AbstractSuper(), Super))
self.assertEqual(False, isinstance(AbstractSuper(), Child))
self.assertEqual(True, isinstance(AbstractChild(), AbstractChild))
self.assertEqual(True, isinstance(AbstractChild(), AbstractSuper))
self.assertEqual(False, isinstance(AbstractChild(), Super))
self.assertEqual(False, isinstance(AbstractChild(), Child))
def test_subclass_normal(self):
# normal classes
self.assertEqual(True, issubclass(Super, Super))
self.assertEqual(False, issubclass(Super, AbstractSuper))
self.assertEqual(False, issubclass(Super, Child))
self.assertEqual(True, issubclass(Child, Child))
self.assertEqual(True, issubclass(Child, Super))
self.assertEqual(False, issubclass(Child, AbstractSuper))
def test_subclass_abstract(self):
# abstract classes
self.assertEqual(True, issubclass(AbstractSuper, AbstractSuper))
self.assertEqual(False, issubclass(AbstractSuper, AbstractChild))
self.assertEqual(False, issubclass(AbstractSuper, Child))
self.assertEqual(True, issubclass(AbstractChild, AbstractChild))
self.assertEqual(True, issubclass(AbstractChild, AbstractSuper))
self.assertEqual(False, issubclass(AbstractChild, Super))
self.assertEqual(False, issubclass(AbstractChild, Child))
def test_subclass_tuple(self):
# test with a tuple as the second argument classes
self.assertEqual(True, issubclass(Child, (Child,)))
self.assertEqual(True, issubclass(Child, (Super,)))
self.assertEqual(False, issubclass(Super, (Child,)))
self.assertEqual(True, issubclass(Super, (Child, Super)))
self.assertEqual(False, issubclass(Child, ()))
self.assertEqual(True, issubclass(Super, (Child, (Super,))))
self.assertEqual(True, issubclass(NewChild, (NewChild,)))
self.assertEqual(True, issubclass(NewChild, (NewSuper,)))
self.assertEqual(False, issubclass(NewSuper, (NewChild,)))
self.assertEqual(True, issubclass(NewSuper, (NewChild, NewSuper)))
self.assertEqual(False, issubclass(NewChild, ()))
self.assertEqual(True, issubclass(NewSuper, (NewChild, (NewSuper,))))
self.assertEqual(True, issubclass(int, (long, (float, int))))
if test_support.have_unicode:
self.assertEqual(True, issubclass(str, (unicode, (Child, NewChild, basestring))))
def test_subclass_recursion_limit(self):
# make sure that issubclass raises RuntimeError before the C stack is
# blown
self.assertRaises(RuntimeError, blowstack, issubclass, str, str)
def test_isinstance_recursion_limit(self):
# make sure that issubclass raises RuntimeError before the C stack is
# blown
self.assertRaises(RuntimeError, blowstack, isinstance, '', str)
def blowstack(fxn, arg, compare_to):
# Make sure that calling isinstance with a deeply nested tuple for its
# argument will raise RuntimeError eventually.
tuple_arg = (compare_to,)
if test_support.check_impl_detail(cpython=True):
RECURSION_LIMIT = sys.getrecursionlimit()
else:
# on non-CPython implementations, the maximum
# actual recursion limit might be higher, but
# probably not higher than 99999
#
RECURSION_LIMIT = 99999
for cnt in xrange(RECURSION_LIMIT+5):
tuple_arg = (tuple_arg,)
fxn(arg, tuple_arg)
def test_main():
test_support.run_unittest(
TestIsInstanceExceptions,
TestIsSubclassExceptions,
TestIsInstanceIsSubclass
)
if __name__ == '__main__':
test_main()
|
|
import argparse
import json
import logging
import os
import pprint
import random
import requests
import time
import ai
import config
def setup_args():
parser = argparse.ArgumentParser('Mech-AI Client')
parser.add_argument('-u', '--username', nargs=1)
parser.add_argument('-t', '--token', nargs=1)
args = parser.parse_args()
args.username = None if args.username is None else args.username[0]
args.token = None if args.token is None else args.token[0]
return args
def print_state(state):
state = json.loads(state) # TODO: Why doesn't this load the entire state? Is state not jsonified properly?
print('State:')
print('\tturn: {}/{}'.format(state['current_turn'], state['max_turns']))
print('\tplayers:')
for player in state['players'].values():
print('\t\tname: {}'.format(player['name']))
print('\t\tpos: {}'.format(player['pos']))
print('\t\torientation: {}'.format(player['orientation']))
print('\t\thealth: {}'.format(player['health']))
print('\t\tammo: {}'.format(player['ammo']))
print('\t\tscore: {}'.format(player['score']))
print('\tmap:\n{}'.format(state['map']))
def register_user(username):
""" Register a new username. """
logging.debug('Attempting to register username: {} ...'.format(username))
path = '/api/v1/users/register'
url = config.host + path
data = {'username': username}
r = requests.post(url, data=json.dumps(data))
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
logging.warn('Bad response: {}\n{}'.format(r.status_code, e))
return None
output = r.json()
username, access_token = output['username'], output['access_token']
if access_token is not None:
print('Registration successful!')
print('\tUsername: {}'.format(username))
print('\tAccess token: {}'.format(access_token))
else:
print('Registration failed.')
return username, access_token
def prompt_username():
""" Prompt user to enter a new username. """
while True:
ans = raw_input('Enter new username: ')
if ans == '':
print('Please enter a valid username')
else:
return ans
def prompt_switch_username(new_user, new_token, old_user, old_token):
""" Ask if we want to log in as our new user. """
while True:
ans = raw_input('Login as new username? (Y/n) ')
if ans.lower() in ('', 'y'):
return new_user, new_token
elif ans.lower() == 'n':
return old_user, old_token
else:
print('Please enter y(es) or n(o)')
def prompt_game_id(username, access_token):
""" Get input game ID. """
while True:
answer = raw_input('Enter game ID: ')
game_id = answer if answer else find_game(username, access_token)
if answer == 'q':
return None
try:
game_id = str(int(game_id)) # TODO: Are all game ID's ints?
except:
print('Please enter a valid integer game ID')
else:
return game_id
def prompt_select_map():
""" Choose a valid map name. """
map_paths = query_maps_api()['maps']
map_names = [os.path.basename(map_path) for map_path in map_paths]
for i, map_name in enumerate(map_names, start=1):
print('\t{}. {}'.format(i, map_name))
while True:
answer = raw_input('Select a map: ')
if answer == '':
return map_names[0]
try:
return map_names[int(answer) - 1]
except ValueError:
print('Please enter a number')
except IndexError:
print('Please select one of the above')
def prompt_create_game(username):
""" Gather inputs required to create a game. """
answer = raw_input('Enter game name: ')
name = 'Mech AI' if answer == '' else answer
map_name = prompt_select_map()
while True:
answer = raw_input('Enter number of rounds: ')
try:
rounds = 100 if answer == '' else int(answer)
break
except ValueError:
print('Please enter a number')
answer = raw_input('Enter list of player usernames: ')
players = [username] if answer == '' else [x.strip() for x in answer.split()]
return name, map_name, rounds, players
def create_game(username, access_token, name, players, rounds):
""" Create a new game. """
logging.debug('Attempting to create game...')
path = '/api/v1/games/create'
url = config.host + path
headers = {
'Username': username,
'Access-Token': access_token
}
data = json.dumps({
'name': name,
'players': players,
'rounds': rounds,
})
r = requests.post(url, headers=headers, data=data)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
logging.warn('Bad response: {}\n{}'.format(r.status_code, e))
return None
output = r.json()
logging.debug('\t' + output['message'])
game_id, players = output['id'], output['players']
if game_id is not None:
print('\tGame ID: {}'.format(game_id))
print('\tName: {}'.format(name))
print('\tPlayers: {}'.format(', '.join(players)))
print('\tRounds: {}'.format(rounds))
return game_id
def query_maps_api():
""" Post data to our game and get JSON response. """
path = '/api/v1/maps/'
url = config.host + path
r = requests.get(url)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
logging.warn('Bad response: {}\n{}'.format(r.status_code, e))
return None
return r.json()
def find_game(username, access_token):
""" Attempt to locate a game to play, for the given username. """
logging.debug('Attempting to find a game...')
# TODO: Why are we not using our /api/v1/games/?user=x route?
# TODO: This games/find seems redundant
path = '/api/v1/games/find'
url = config.host + path
headers = {
'username': username,
'access_token': access_token
}
r = requests.get(url, headers=headers)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
logging.warn('Bad response: {}\n{}'.format(r.status_code, e))
return None
output = r.json()
logging.debug('\t' + output['message'])
game_id, players = output['id'], output['players']
if game_id is not None:
print('\tGame ID: {}'.format(game_id))
print('\tPlayers: {}'.format(', '.join(players)))
return game_id
def post_to_game(url, headers, data):
""" Post data to our game and get JSON response. """
r = requests.post(url, headers=headers, data=json.dumps(data))
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
logging.warn('Bad response: {}\n{}'.format(r.text, e))
return None
else:
return r.json()
def join_game(url, headers, data):
""" Send messages to join the game until the game starts. """
logging.debug('Attempting to join game...')
data['message'] = 'join'
while True:
output = post_to_game(url, headers, data)
message = output['message']
logging.debug('\tMessage: ' + message)
if message == 'Game started':
return True
elif message == 'Game complete':
return False
else:
time.sleep(0.5)
def await_turn(url, headers, data):
""" Request state until it is returned (meaning it is our turn) """
logging.debug('Awaiting my turn...')
data['message'] = 'status'
while True: # Play until game ends
logging.debug('Posting status:\n{}'.format(data))
output = post_to_game(url, headers, data)
if output is None:
logging.error('Received None output from server')
return None, None
elif output['message'] == 'Game complete':
print('Message: Game complete')
return 'Game complete', output['state']
elif output['message'] == 'Not your turn':
print('Message: Not your turn')
time.sleep(0.25)
elif output['message'] == 'Your turn':
print('Message: Your turn')
return 'Your turn', output['state']
def make_moves(url, headers, data):
logging.debug('Attempting to make moves...')
while True: # Play until game ends
message, state = await_turn(url, headers, data) # Poll until we get a state (it's our turn)
if message is None: # Game complete
return
if message == 'Game complete': # TODO: this is horrible. Do printing elsewhere.
print_state(state)
return
print_state(state)
data['message'] = 'move' # Making a move
data['move'] = ai.make_move(state) # Determine move
print('Posting move: <{}>'.format(data['move']))
output = post_to_game(url, headers, data)
if output is None:
logging.error('Received <None> output from server')
return
del data['move'] # Erase contents each turn
del data['message']
print('Message: {}'.format(output['message']))
if output['message'] == 'Game complete':
return
elif output['message'] in ('Move accepted', 'Move rejected'):
continue
def play_game(game_id, username, access_token):
""" Get a game ID for an existing game, if you are a listed player. """
logging.debug('Attempting to play game...')
url = config.host + '/api/v1/games/play'
headers = {
'username': username,
'access_token': access_token
}
data = {'game_id': game_id}
if join_game(url, headers, data):
make_moves(url, headers, data)
def main():
args = setup_args()
username = args.username if args.username else config.username
access_token = args.token if args.token else config.access_token
while True:
print('\nLogged in as <{}>'.format(username))
print('Choose an option:')
print('\t1. Register new username')
print('\t2. Create new game')
print('\t3. Join existing game')
print('\t4. Quit')
ans = raw_input('\t> ')
if ans == '1':
new_username, new_token = register_user(prompt_username())
if new_username is not None:
username, access_token = prompt_switch_username(new_username, new_token, username, access_token)
elif ans == '2':
name, map, rounds, players = prompt_create_game(username)
game_id = create_game(username, access_token, name=name, players=players, rounds=rounds)
elif ans == '3':
game_id = prompt_game_id(username, access_token)
if game_id:
play_game(game_id, username, access_token)
elif ans == '4':
print('Thanks for playing!\n')
break
else:
print('Please choose a valid option (1-4)')
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
"""
abm.xypops
~~~~~~~~~~
Environments not backed by networkx whose x, y traits are used in visualization
"""
from scipy.stats.distributions import norm
from scipy.stats.distributions import uniform
from sklearn.metrics.pairwise import euclidean_distances
from abm.viz import display_network
from abm.pops import Environment
from abm.entities import XyEntity
import numpy as np
from random import choice
Y_DIST = norm(300, 10)
CLUSTER_X_DIST_MAP = {
'A': uniform(0, 50),
'B': uniform(30, 50),
'C': uniform(60, 50)
}
CLUSTER_SIZES = {
'A': 8,
'B': 10,
'C': 8
}
def make_points(cluster, size, y_dist, x_dist):
"""Creates a set of points using y_dist and x_dist to draw the location."""
ys = y_dist.rvs(size)
xs = x_dist.rvs(size)
return list(zip(xs, ys, [cluster] * size))
class XyEnvironment(Environment):
"""
A set of connected Entities. Handles message passing and displaying.
Entities are connected randomly.
"""
def __init__(self, y_pos_dist=Y_DIST, cluster_x_dists=CLUSTER_X_DIST_MAP,
cluster_sizes=CLUSTER_SIZES, single_component=True,
entity_class=XyEntity, **kwargs):
super(XyEnvironment, self).__init__(**kwargs)
self.population = []
self.connectivity_matrix = None
self.connected_components = []
self.node_component_map = {}
self.entity_class = entity_class
self._set_entities(y_pos_dist, cluster_x_dists, cluster_sizes)
self._set_connectivity_matrix()
self._set_connections()
if single_component:
self._ensure_single_component()
def _set_entities(self, y_pos_dist, cluster_x_dists, cluster_sizes):
point_args = []
for cluster, size in cluster_sizes.iteritems():
point_args += make_points(cluster, size,
y_pos_dist, cluster_x_dists[cluster])
for ix, (x, y, cluster) in enumerate(point_args):
pt = self.entity_class(environment=self, index=ix, x=x, y=y, cluster=cluster)
self.population.append(pt)
self.size = len(self.population)
def _set_connections(self, track_components=True):
"""Initializes each Entity's adjacency list.
:param track_components: Flag for tracking connected components during graph construction
"""
for index, point in enumerate(self.population):
# make set of connections to indices; np.where returns a tuple
adjacencies = set(np.where(self.connectivity_matrix[index] > 0)[0])
adjacencies.discard(index)
# pass adjacency information down to agent
point.set_adjacencies(adjacencies)
if track_components:
# track connected components as we construct edges
if index in self.node_component_map:
component = self.node_component_map[index]
else:
component = set([index])
self.node_component_map[index] = component
self.connected_components.append(component)
# update the component in place with potential new members
component.update(adjacencies)
# update the node - component map so we can fetch this object
# for adjacencies
self.node_component_map.update(
{a: component for a in adjacencies})
# resolve potential component connections
self._resolve_components(component)
n = float(len(self.population))
k = float(np.sum(self.connectivity_matrix)) / 2
self.edge_density = k / (n * (n - 1) / 2)
def _ensure_single_component(self):
"""
Iterate through disjoint component list, adding connections between sequential components
Update other datastructures to reflect the new connections
"""
for ix, component in enumerate(self.connected_components[:-1]):
start, end = (choice(list(component)), choice(
list(self.connected_components[ix + 1])))
self.population[start].adjacencies.append(end)
self.population[end].adjacencies.append(start)
self.connectivity_matrix[start][end] = True
self.connectivity_matrix[end][start] = True
self.connected_components[ix].add(end)
self.connected_components[ix + 1].add(start)
self._resolve_components(self.connected_components[0])
def _resolve_components(self, component):
"""
Find components thought to be separate that now have intersections
Condense these and set self.connected_components to be a list of disjoint sets
"""
resolved_components = [component]
for other_component in self.connected_components:
if other_component.intersection(component) or other_component is component:
component.update(other_component)
self.node_component_map.update(
{a: component for a in other_component})
else:
resolved_components.append(other_component)
self.connected_components = resolved_components
def _set_connectivity_matrix(self):
"""
Computes the connectivity matrix of this Environment. Each point is
connected to each other within a radius.
"""
if self.connectivity_matrix is not None:
return
# generate a random symmetric matrix
point_count = len(self.population)
matrix = np.random.randint(
0, 2, point_count ** 2).reshape(point_count, point_count)
matrix = (matrix + matrix.T) / 2
for i in range(point_count):
matrix[i][i] = 0
self.connectivity_matrix = matrix
def display(self, current=None, target=None):
"""
Plots the state of the task. If <show> = False, doesn't plot
anything and the simulation can run faster.
"""
if not self.show:
return
display_network(self.population, self.connectivity_matrix,
current=current, target=target)
class CappedPreferentialEnvironment(XyEnvironment):
"""
A set of connected Entities. Handles message passing and displaying. Connections are laid
out such that entities of the same cluster are more likely to be tied together,
proportionally to a parameter alpha. The overall density of the network is controlled
by a parameter beta.
"""
def __init__(self, alpha=0.8, beta=0.4, *args, **kwargs):
self.alpha = alpha
self.beta = beta
super(CappedPreferentialEnvironment, self).__init__(*args, **kwargs)
def _set_connectivity_matrix(self):
"""
Computes the connectivity matrix of this Environment. Each point is
connected to each other within a radius.
"""
if self.connectivity_matrix is not None:
return
def decide_connection(point1, point2):
# A point is connected to another point of its same cluster
# with high probability proportional to alpha, and to
# another point of a different clluester with probability
# proportional to 1 - alpha.
# Moreover, the edge density of a network is capped at a value
# beta. That's why we choose a 0 with probability 1-beta,
# and partition beta into alpha and 1-alpha.
alpha = self.alpha
beta = self.beta
if point1.cluster == point2.cluster:
tie = np.random.choice(
[0, 0, 1], p=[1 - beta, beta * (1 - alpha), beta * alpha])
else:
tie = np.random.choice(
[0, 0, 1], p=[1 - beta, beta * alpha, beta * (1 - alpha)])
return tie
matrix = np.array([[0] * len(self.population)
for _ in range(len(self.population))])
# since the graph is undirected, the matrix is symmetric,
# which in turn means we need only compute the lower triangular
# elements and then copy them into the upper triangular elements
for i, point1 in enumerate(self.population):
for j, point2 in enumerate(self.population[:i]):
matrix[i][j] = decide_connection(point1, point2)
matrix[j][i] = matrix[i][j]
self.connectivity_matrix = matrix
class NearestNeighborsEnvironment(XyEnvironment):
"""
A set of connected Entities. Handles message passing and displaying. Connections laid
out geographically: each point is connected to some of its nearest neighbors.
"""
def _set_connectivity_matrix(self):
"""
Computes the connectivity matrix of this Environment. Each point is
connected to each other within a radius.
"""
if self.connectivity_matrix is not None:
return
points_arr = np.array([[p.x, p.y] for p in self.population])
distance_mat = euclidean_distances(points_arr, points_arr)
# Every point p will be connected to each other point whose distance
# to p is less than a cut-off value. This value is computed as the
# mean of {min_nonzero(dist_mat(p)) | p is a point}, times a factor
def min_nonzero(r):
return min(r[r > 0])
# apply_along_axis(f, axis=1, arr) applies f to each row
min_neighbor_distances = np.apply_along_axis(
min_nonzero, axis=1, arr=distance_mat)
factor = 2.2
neighbor_cutoff = np.mean(min_neighbor_distances) * factor
connectivity_matrix = distance_mat < neighbor_cutoff
self.connectivity_matrix = connectivity_matrix
|
|
"""Docker Sproc
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import socket
import sys
import time
import click
import docker
import six
from treadmill import cli
from treadmill import exc
from treadmill import utils
from treadmill import supervisor
from treadmill.appcfg import abort as app_abort
_LOGGER = logging.getLogger(__name__)
def _read_environ(envdirs):
"""Read a list of environ directories and return a full envrion ``dict``.
:returns:
``dict`` - Environ dictionary.
"""
environ = {}
for envdir in envdirs:
environ.update(supervisor.read_environ_dir(envdir))
return environ
def _get_image_user(image_attrs):
"""User is in Config data
"""
config = image_attrs.get('Config', {})
return config.get('User', None)
def _create_container(client, name, image_name, cmd, **args):
"""Create docker container from given app.
"""
# if success, pull returns an image object
image = client.images.pull(image_name)
container_args = {
'name': name,
'image': image_name,
'command': list(cmd),
'detach': True,
'stdin_open': True,
'tty': True,
'network_mode': 'host',
'pid_mode': 'host',
'ipc_mode': 'host',
# XXX: uts mode not supported by python lib yet
# 'uts_mode': 'host',
}
# assign user argument
user = _get_image_user(image.attrs)
if user is None or user == '':
uid = os.getuid()
gid = os.getgid()
container_args['user'] = '{}:{}'.format(uid, gid)
# add additonal container args
for key, value in six.iteritems(args):
if value is not None:
container_args[key] = value
try:
# The container might exist already
# TODO: start existing container with different ports
container = client.containers.get(name)
container.remove(force=True)
except docker.errors.NotFound:
pass
_LOGGER.info('Run docker: %r', container_args)
return client.containers.create(**container_args)
def _transform_volumes(volumes):
"""Transform volume mapping from list to dict regconized by docker lib
"""
dict_volume = {}
for volume in volumes:
# Example format:
# /var/tmp:/dest_var_tmp:rw => {
# /var/tmp': {
# 'bind': '/dest_var_tmp',
# 'mode': 'rw
# }
# }
(target, source, mode) = volume.split(':', 2)
dict_volume[target] = {'bind': source, 'mode': mode}
return dict_volume
class DockerSprocClient(object):
"""Docker Treadmill Sproc client
"""
__slots__ = (
'client',
'param',
'tm_env',
)
def __init__(self, param=None):
self.client = None
if param is None:
self.param = {}
else:
self.param = param
# wait for dockerd ready
time.sleep(1)
def _get_client(self):
"""Gets the docker client.
"""
if self.client is not None:
return self.client
self.client = docker.from_env(**self.param)
return self.client
def run(self, name, image, cmd, **args):
"""Run
"""
client = self._get_client()
try:
if 'volumes' in args:
args['volumes'] = _transform_volumes(args['volumes'])
if 'envdirs' in args:
args['environment'] = _read_environ(args.pop('envdirs'))
container = _create_container(
client, name, image, cmd, **args
)
except docker.errors.ImageNotFound:
raise exc.ContainerSetupError(
'Image {0} was not found'.format(image),
app_abort.AbortedReason.IMAGE
)
container.start()
container.reload()
logs_gen = container.logs(
stdout=True,
stderr=True,
stream=True,
follow=True
)
_LOGGER.info('Container %s is running', name)
while container.status == 'running':
try:
for log_lines in logs_gen:
sys.stderr.write(log_lines)
except socket.error:
pass
container.reload()
rc = container.wait()
if os.WIFSIGNALED(rc):
# Process died with a signal in docker
sig = os.WTERMSIG(rc)
os.kill(os.getpid(), sig)
else:
utils.sys_exit(os.WEXITSTATUS(rc))
def init():
"""Top level command handler."""
@click.command()
@click.option('--name', required=True, help='name of container')
@click.option('--image', required=True, help='container image')
@click.argument('cmd', nargs=-1)
@click.option('--user', required=False,
help='userid in the form UID:GID')
@click.option('--envdirs', type=cli.LIST, required=False, default='',
help='List of environ directory to pass into the container.')
@click.option('--read-only', is_flag=True, default=False,
help='Mount the docker image read-only')
@click.option('--volume', multiple=True, required=False,
help='Specify each volume as TARGET:SOURCE:MODE')
def configure(name, image, cmd, user, envdirs, read_only, volume):
"""Configure local manifest and schedule app to run."""
service_client = DockerSprocClient()
service_client.run(
# manditory parameters
name, image, cmd,
# optional parameters
user=user,
envdirs=envdirs,
read_only=read_only,
volumes=volume,
)
return configure
|
|
#!/usr/bin/env python2.7 -B
from fnmatch import fnmatch
from glob import glob
from logging import debug, info, error
from os import path
import contextlib
from distutils import spawn, sysconfig
import os
import shutil
import site
import subprocess
import sys
import tarfile
import tempfile
from urllib.request import urlopen
import zipfile
VARS = {}
def setvar(**kwargs):
for key, item in kwargs.items():
VARS[key] = item.format(**VARS)
def getvar(key):
return VARS.get(key, None)
def fill_in(value):
if type(value) == str:
return value.format(**VARS)
return value
def fill_in_args(fn):
def wrapper(*args, **kwargs):
args = list(fill_in(arg) for arg in args)
kwargs = dict((key, fill_in(value)) for key, value in kwargs.items())
return fn(*args, **kwargs)
return wrapper
def flatten(*args):
queue = list(args)
while queue:
item = queue.pop(0)
if type(item) == list:
queue = item + queue
elif type(item) == tuple:
queue = list(item) + queue
else:
yield item
chdir = fill_in_args(os.chdir)
path.exists = fill_in_args(path.exists)
path.join = fill_in_args(path.join)
path.relpath = fill_in_args(path.relpath)
@fill_in_args
def panic(*args):
error(*args)
sys.exit(1)
@fill_in_args
def topdir(name):
if not path.isabs(name):
name = path.abspath(name)
return path.relpath(name, '{top}')
@fill_in_args
def find_executable(name):
return (spawn.find_executable(name) or
panic('Executable "%s" not found!', name))
@fill_in_args
def find(root, **kwargs):
only_files = kwargs.get('only_files', False)
include = kwargs.get('include', ['*'])
exclude = kwargs.get('exclude', [''])
lst = []
for name in sorted(os.listdir(root)):
fullname = path.join(root, name)
is_dir = path.isdir(fullname)
excluded = any(fnmatch(name, pat) for pat in exclude)
included = any(fnmatch(name, pat) for pat in include)
if included and not excluded:
if not (is_dir and only_files):
lst.append(fullname)
if is_dir and not excluded:
lst.extend(find(fullname, **kwargs))
return lst
@fill_in_args
def touch(name):
try:
os.utime(name, None)
except:
open(name, 'a').close()
@fill_in_args
def mkdtemp(**kwargs):
if 'dir' in kwargs and not path.isdir(kwargs['dir']):
mkdir(kwargs['dir'])
return tempfile.mkdtemp(**kwargs)
@fill_in_args
def mkstemp(**kwargs):
if 'dir' in kwargs and not path.isdir(kwargs['dir']):
mkdir(kwargs['dir'])
return tempfile.mkstemp(**kwargs)
@fill_in_args
def rmtree(*names):
for name in flatten(names):
if path.isdir(name):
debug('rmtree "%s"', topdir(name))
shutil.rmtree(name)
@fill_in_args
def remove(*names):
for name in flatten(names):
if path.isfile(name):
debug('remove "%s"', topdir(name))
os.remove(name)
@fill_in_args
def mkdir(*names):
for name in flatten(names):
if name and not path.isdir(name):
debug('makedir "%s"', topdir(name))
os.makedirs(name)
@fill_in_args
def copy(src, dst):
debug('copy "%s" to "%s"', topdir(src), topdir(dst))
shutil.copy2(src, dst)
@fill_in_args
def copytree(src, dst, **kwargs):
debug('copytree "%s" to "%s"', topdir(src), topdir(dst))
mkdir(dst)
for name in find(src, **kwargs):
prefix = path.join(dst, path.relpath(name, src))
if path.isdir(name):
mkdir(prefix)
else:
copy(name, prefix)
@fill_in_args
def move(src, dst):
debug('move "%s" to "%s"', topdir(src), topdir(dst))
shutil.move(src, dst)
@fill_in_args
def symlink(src, name):
if not path.islink(name):
debug('symlink "%s" points at "%s"', topdir(name), src)
os.symlink(src, name)
@fill_in_args
def chmod(name, mode):
debug('change permissions on "%s" to "%o"', topdir(name), mode)
os.chmod(name, mode)
@fill_in_args
def execute(*cmd):
debug('execute "%s"', " ".join(cmd))
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError as ex:
panic('command "%s" failed with %d', " ".join(list(ex.cmd)), ex.returncode)
@fill_in_args
def textfile(*lines):
f, name = mkstemp(dir='{tmpdir}')
debug('creating text file script "%s"', topdir(name))
os.write(f, '\n'.join(lines) + '\n')
os.close(f)
return name
@fill_in_args
def download(url, name):
info('download "%s" to "%s"', url, topdir(name))
u = urlopen(url)
meta = u.info()
try:
size = int(meta['Content-length'])
except IndexError:
size = None
if size:
info('download: %s (size: %d)', name, size)
else:
info('download: %s', name)
with open(name, 'wb') as f:
done = 0
block = 8192
while True:
buf = u.read(block)
if not buf:
break
done += len(buf)
f.write(buf)
if size:
status = r"%d [%3.2f%%]" % (done, done * 100. / size)
else:
status = r"%d" % done
status = status + chr(8) * (len(status) + 1)
sys.stdout.write(status)
sys.stdout.flush()
@fill_in_args
def unarc(name):
info('extract files from "%s"', topdir(name))
if any(name.endswith(ext) for ext in ['.tar.gz', '.tar.bz2', '.tar.xz']):
with tarfile.open(name) as arc:
for item in arc.getmembers():
debug('extract "%s"' % item.name)
arc.extract(item)
elif name.endswith('.zip'):
with zipfile.ZipFile(name) as arc:
for item in arc.infolist():
debug('extract "%s"' % item.filename)
arc.extract(item)
else:
raise RuntimeError('Unrecognized archive: "%s"', name)
@fill_in_args
def find_site_dir(dirname):
prefix = sysconfig.EXEC_PREFIX
destlib = sysconfig.get_config_var('DESTLIB')
return path.join(dirname, destlib[len(prefix) + 1:], 'site-packages')
@fill_in_args
def add_site_dir(dirname):
dirname = find_site_dir(dirname)
info('adding "%s" to python site dirs', topdir(dirname))
site.addsitedir(dirname)
@contextlib.contextmanager
def cwd(name):
old = os.getcwd()
if not path.exists(name):
mkdir(name)
try:
debug('enter directory "%s"', topdir(name))
chdir(name)
yield
finally:
chdir(old)
@contextlib.contextmanager
def env(**kwargs):
backup = {}
try:
for key, value in kwargs.items():
debug('changing environment variable "%s" to "%s"', key, value)
old = os.environ.get(key, None)
os.environ[key] = fill_in(value)
backup[key] = old
yield
finally:
for key, value in backup.items():
debug('restoring old value of environment variable "%s"', key)
if value is None:
del os.environ[key]
else:
os.environ[key] = value
def recipe(name, nargs=0):
def real_decorator(fn):
@fill_in_args
def wrapper(*args, **kwargs):
target = [str(arg) for arg in args[:min(nargs, len(args))]]
if len(target) > 0:
target = [target[0], fill_in(name)] + target[1:]
target = '-'.join(target)
else:
target = fill_in(name)
target = target.replace('_', '-')
target = target.replace('/', '-')
stamp = path.join('{stamps}', target)
if not path.exists('{stamps}'):
mkdir('{stamps}')
if not path.exists(stamp):
fn(*args, **kwargs)
touch(stamp)
else:
info('already done "%s"', target)
return wrapper
return real_decorator
@recipe('python-setup', 1)
def python_setup(name, **kwargs):
dest_dir = kwargs.get('dest_dir', '{host}')
with cwd(path.join('{build}', name)):
execute(sys.executable, 'setup.py', 'build')
execute(sys.executable, 'setup.py', 'install', '--prefix=' + dest_dir)
@recipe('fetch', 1)
def fetch(name, url):
if url.startswith('http') or url.startswith('ftp'):
if not path.exists(name):
download(url, name)
else:
info('File "%s" already downloaded.', name)
elif url.startswith('svn'):
if not path.exists(name):
execute('svn', 'checkout', url, name)
else:
with cwd(name):
execute('svn', 'update')
elif url.startswith('git'):
if not path.exists(name):
execute('git', 'clone', url, name)
else:
with cwd(name):
execute('git', 'pull')
elif url.startswith('file'):
if not path.exists(name):
_, src = url.split('://')
copy(src, name)
else:
panic('URL "%s" not recognized!', url)
@recipe('unpack', 1)
def unpack(name, work_dir='{sources}'):
try:
src = glob(path.join('{archives}', name) + '*')[0]
except IndexError:
panic('Missing files for "%s".', name)
info('preparing files for "%s"', name)
with cwd(work_dir):
unarc(src)
@recipe('patch', 1)
def patch(name, work_dir='{sources}'):
with cwd(work_dir):
for name in find(path.join('{patches}', name),
only_files=True, exclude=['*~']):
if fnmatch(name, '*.diff'):
execute('patch', '-t', '-p0', '-i', name)
else:
dst = path.relpath(name, '{patches}')
mkdir(path.dirname(dst))
copy(name, dst)
@recipe('configure', 1)
def configure(name, *confopts, **kwargs):
info('configuring "%s"', name)
if 'from_dir' in kwargs:
from_dir = kwargs['from_dir']
else:
from_dir = path.join('{sources}', name)
if kwargs.get('copy_source', False):
rmtree(path.join('{build}', name))
copytree(path.join('{sources}', name), path.join('{build}', name))
from_dir = '.'
with cwd(path.join('{build}', name)):
remove(find('.', include=['config.cache']))
execute(path.join(from_dir, 'configure'), *confopts)
@recipe('make', 2)
def make(name, target=None, makefile=None, **makevars):
info('running make "%s"', target)
with cwd(path.join('{build}', name)):
args = ['--jobs={nproc}']
if target is not None:
args = [target] + args
if makefile is not None:
args = ['-f', makefile] + args
args += [fill_in('%s=%s') % item for item in makevars.items()]
execute('make', *args)
def require_header(headers, lang='c', errmsg='', symbol=None, value=None):
debug('require_header "%s"', headers[0])
for header in headers:
cmd = {'c': os.environ['CC'], 'c++': os.environ['CXX']}[lang]
cmd = fill_in(cmd).split()
opts = ['-fsyntax-only', '-x', lang, '-']
proc = subprocess.Popen(cmd + opts,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc_stdin = ['#include <%s>' % header]
if symbol:
if value:
proc_stdin.append("#if %s != %s" % (symbol, value))
else:
proc_stdin.append("#ifndef %s" % symbol)
proc_stdin.append("#error")
proc_stdin.append("#endif")
proc_stdin = bytes('\n'.join(proc_stdin), encoding='utf-8')
proc_stdout, proc_stderr = proc.communicate(proc_stdin)
proc.wait()
if proc.returncode == 0:
return
panic(errmsg)
__all__ = ['setvar', 'getvar', 'panic', 'find_executable', 'chmod', 'execute',
'rmtree', 'mkdir', 'copy', 'copytree', 'unarc', 'fetch', 'cwd',
'symlink', 'remove', 'move', 'find', 'textfile', 'env', 'path',
'add_site_dir', 'find_site_dir', 'python_setup', 'recipe',
'unpack', 'patch', 'configure', 'make', 'require_header', 'touch']
|
|
import sys
import unittest
import decimal
import os.path
from datetime import datetime
from pyorient import PyOrientCommandException, PyOrientSQLParsingException
from pyorient.ogm import Graph, Config
from pyorient.groovy import GroovyScripts
from pyorient.ogm.declarative import declarative_node, declarative_relationship
from pyorient.ogm.property import (
String, Date, DateTime, Decimal, Double, Integer, EmbeddedMap, EmbeddedSet,
Link, UUID)
from pyorient.ogm.what import expand, in_, out, distinct, sysdate
AnimalsNode = declarative_node()
AnimalsRelationship = declarative_relationship()
class Animal(AnimalsNode):
element_type = 'animal'
element_plural = 'animals'
name = String(nullable=False, unique=True)
species = String(nullable=False)
class Food(AnimalsNode):
element_type = 'food'
element_plural = 'foods'
name = String(nullable=False, unique=True)
color = String(nullable=False)
class Beverage(AnimalsNode):
element_type = 'beverage'
element_plural = 'beverages'
name = String(nullable=False, unique=True)
color = String(nullable=False)
class Eats(AnimalsRelationship):
label = 'eats'
modifier = String()
class Dislikes(AnimalsRelationship):
label = 'dislikes'
class Drinks(AnimalsRelationship):
label = 'drinks'
modifier = String()
class OGMAnimalsTestCaseBase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(OGMAnimalsTestCaseBase, self).__init__(*args, **kwargs)
self.g = None
def setUp(self):
g = self.g = Graph(Config.from_url('animals', 'root', 'root', initial_drop=True))
g.create_all(AnimalsNode.registry)
g.create_all(AnimalsRelationship.registry)
def testGraph(self):
assert len(AnimalsNode.registry) == 3
assert len(AnimalsRelationship.registry) == 3
g = self.g
rat = g.animals.create(name='rat', species='rodent')
mouse = g.animals.create(name='mouse', species='rodent')
queried_rat = g.query(Animal).filter(
Animal.name.endswith('at') | (Animal.name == 'tiger')).one()
assert rat == queried_rat
invalid_query_args = {'name': 'rat', 'name="rat" OR 1': 1}
try:
g.animals.query(**invalid_query_args).all()
except:
pass
else:
assert False and 'Invalid params did not raise an exception!'
queried_mouse = g.query(mouse).one()
assert mouse == queried_mouse
assert mouse == g.get_vertex(mouse._id)
assert mouse == g.get_element(mouse._id)
try:
rat2 = g.animals.create(name='rat', species='rodent')
except:
pass
else:
assert False and 'Uniqueness not enforced correctly'
pea = g.foods.create(name='pea', color='green')
queried_pea = g.foods.query(color='green', name='pea').one()
cheese = g.foods.create(name='cheese', color='yellow')
assert queried_pea == pea
rat_eats_pea = g.eats.create(queried_rat, queried_pea, modifier='lots')
mouse_eats_pea = g.eats.create(mouse, pea)
mouse_eats_cheese = Eats.objects.create(mouse, cheese)
assert rat_eats_pea.modifier == 'lots'
assert rat_eats_pea == g.get_edge(rat_eats_pea._id)
assert rat_eats_pea == g.get_element(rat_eats_pea._id)
water = g.beverages.create(name='water', color='clear')
mouse_drinks_water = g.drinks.create(mouse, water)
assert [water] == mouse.out(Drinks)
assert [mouse_drinks_water] == mouse.outE(Drinks)
assert [water] == mouse.both(Drinks)
assert [mouse_drinks_water] == mouse.bothE(Drinks)
nut = g.foods.create(name='nut', color='brown')
rat_dislikes_nut = g.dislikes.create(rat, nut)
mouse_eats_nut = g.eats.create(mouse, nut)
assert [rat] == nut.in_(Dislikes)
assert [rat_dislikes_nut] == nut.inE(Dislikes)
eaters = g.in_(Food, Eats)
assert rat in eaters
# Who eats the peas?
pea_eaters = g.foods.query(name='pea').what(expand(in_(Eats)))
for animal in pea_eaters:
print(animal.name, animal.species)
# Which animals eat each food
# FIXME Currently calling all() here, as iteration over expand()
# results is currently broken.
animal_foods = \
g.animals.query().what(expand(distinct(out(Eats)))).all()
for food in animal_foods:
print(food.name, food.color,
g.query(
g.foods.query(name=food.name).what(expand(in_(Eats)))) \
.what(Animal.name).all())
for food_name, food_color in g.query(Food.name, Food.color):
print(food_name, food_color) # 'pea green' # 'cheese yellow'
# FIXME While it is nicer to use files, parser should be more
# permissive with whitespace
g.scripts.add(GroovyScripts.from_string(
"""
def get_eaters_of(food_type) {
return g.V('@class', 'food').has('name', T.eq, food_type).inE().outV();
}
def get_foods_eaten_by(animal) {
return g.v(animal).outE('eats').inV()
}
def get_colored_eaten_foods(animal, color) {
return g.v(animal).outE('eats').inV().has('color', T.eq, color)
}
"""))
pea_eaters = g.gremlin('get_eaters_of', 'pea')
for animal in pea_eaters:
print(animal.name, animal.species) # 'rat rodent' # 'mouse rodent'
rat_cuisine = g.gremlin('get_foods_eaten_by', (rat,))
for food in rat_cuisine:
print(food.name, food.color) # 'pea green'
batch = g.batch()
batch['zombie'] = batch.animals.create(name='zombie',species='undead')
batch['brains'] = batch.foods.create(name='brains', color='grey')
# Retry up to twenty times
batch[:] = batch.eats.create(batch[:'zombie'], batch[:'brains']).retry(20)
batch['unicorn'] = batch.animals.create(name='unicorn', species='mythical')
batch['unknown'] = batch.foods.create(name='unknown', color='rainbow')
batch['mystery_diet'] = batch[:'unicorn'](Eats) > batch[:'unknown']
# Commits and clears batch
zombie = batch['$zombie']
assert zombie.species == 'undead'
class OGMAnimalsRegistryTestCase(OGMAnimalsTestCaseBase):
def testRegistry(self):
g = self.g
schema_registry = g.build_mapping(declarative_node(), declarative_relationship(), auto_plural=True)
assert all(c in schema_registry for c in ['animal', 'food', 'eats'])
assert type(schema_registry['animal'].species) == String
# Plurals not communicated to schema; postprocess registry before
# include() if you have a better solution than auto_plural.
assert schema_registry['food'].registry_plural != Food.registry_plural
g.clear_registry()
assert len(g.registry) == 0
g.include(schema_registry)
assert set(g.registry.keys()) == set(['food', 'dislikes', 'eats', 'beverage', 'animal', 'drinks'])
rat = g.animal.create(name='rat', species='rodent')
mouse = g.animal.create(name='mouse', species='rodent')
rat_class = g.registry['animal']
queried_rat = g.query(rat_class).filter(
rat_class.name.endswith('at') | (rat_class.name == 'tiger')).one()
assert rat == queried_rat
# try again, to make sure that brokers get cleared correctly
schema_registry = g.build_mapping(
declarative_node(), declarative_relationship(), auto_plural=True)
g.clear_registry()
g.include(schema_registry)
assert set(g.registry.keys()) == set(['food', 'dislikes', 'eats', 'beverage', 'animal', 'drinks'])
MoneyNode = declarative_node()
MoneyRelationship = declarative_relationship()
class Person(MoneyNode):
element_plural = 'people'
full_name = String(nullable=False)
uuid = String(nullable=False, default=UUID())
class Wallet(MoneyNode):
element_plural = 'wallets'
amount_precise = Decimal(name='amount', nullable=False)
amount_imprecise = Double()
class Carries(MoneyRelationship):
# No label set on relationship; Broker will not be attached to graph.
pass
class OGMMoneyTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(OGMMoneyTestCase, self).__init__(*args, **kwargs)
self.g = None
def setUp(self):
g = self.g = Graph(Config.from_url('money', 'root', 'root'
, initial_drop=True))
g.create_all(MoneyNode.registry)
g.create_all(MoneyRelationship.registry)
def testDoubleSerialization(self):
# Using str() on a float object in Python 2 sometimes
# returns scientific notation, which causes queries to be misapplied.
# Similarly, many alternative approaches of turning floats to strings
# in Python can cause loss of precision.
g = self.g
# Try very large values, very small values, and values with a lot of decimals.
target_values = [1e50, 1e-50, 1.23456789012]
for value in target_values:
amount_imprecise = value
amount_precise = decimal.Decimal(amount_imprecise)
original_wallet = g.wallets.create(amount_imprecise=amount_imprecise,
amount_precise=amount_precise)
wallet = g.query(Wallet).filter(
(Wallet.amount_imprecise > (value * (1 - 1e-6))) &
(Wallet.amount_imprecise < (value * (1 + 1e+6)))
).one()
assert wallet.amount_imprecise == original_wallet.amount_imprecise
assert wallet.amount_precise == original_wallet.amount_precise
def testMoney(self):
assert len(MoneyNode.registry) == 2
assert len(MoneyRelationship.registry) == 1
g = self.g
if g.server_version.major == 1:
self.skipTest(
'UUID method does not exists in OrientDB version < 2')
costanzo = g.people.create(full_name='Costanzo Veronesi', uuid=UUID())
valerius = g.people.create(full_name='Valerius Burgstaller'
, uuid=UUID())
if g.server_version >= (2,1,0):
# Default values supported
oliver = g.people.create(full_name='Oliver Girard')
else:
oliver = g.people.create(full_name='Oliver Girard', uuid=UUID())
# If you override nullable properties to be not-mandatory, be aware that
# OrientDB version < 2.1.0 does not count null
assert Person.objects.query().what(distinct(Person.uuid)).count() == 3
original_inheritance = decimal.Decimal('1520841.74309871919')
inheritance = g.wallets.create(
amount_precise = original_inheritance
, amount_imprecise = original_inheritance)
assert inheritance.amount_precise == original_inheritance
assert inheritance.amount_precise != inheritance.amount_imprecise
pittance = decimal.Decimal('0.1')
poor_pouch = g.wallets.create(
amount_precise=pittance
, amount_imprecise=pittance)
assert poor_pouch.amount_precise == pittance
assert poor_pouch.amount_precise != poor_pouch.amount_imprecise
# Django-style creation
costanzo_claim = Carries.objects.create(costanzo, inheritance)
valerius_claim = Carries.objects.create(valerius, inheritance)
oliver_carries = Carries.objects.create(oliver, poor_pouch)
g.scripts.add(GroovyScripts.from_file(
os.path.join(
os.path.split(
os.path.abspath(__file__))[0], 'money.groovy')), 'money')
rich_list = g.gremlin('rich_list', 1000000, namespace='money')
assert costanzo in rich_list and valerius in rich_list \
and oliver not in rich_list
bigwallet_query = g.query(Wallet).filter(Wallet.amount_precise > 100000)
smallerwallet_query = g.query(Wallet).filter(
Wallet.amount_precise < 100000)
# Basic query slicing
assert len(bigwallet_query[:]) == 1
assert len(smallerwallet_query) == 1
assert bigwallet_query.first() == inheritance
pouch = smallerwallet_query[0]
assert pouch == poor_pouch
assert len(pouch.outE()) == len(pouch.out())
assert pouch.in_() == pouch.both() and pouch.inE() == pouch.bothE()
first_inE = pouch.inE()[0]
assert first_inE == oliver_carries
assert first_inE.outV() == oliver and first_inE.inV() == poor_pouch
for i, wallet in enumerate(g.query(Wallet)):
print(decimal.Decimal(wallet.amount_imprecise) -
wallet.amount_precise)
assert i < 2
schema_registry = g.build_mapping(MoneyNode, MoneyRelationship)
assert all(c in schema_registry for c in ['person', 'wallet', 'carries'])
WalletType = schema_registry['wallet']
# Original property name, amount_precise, lost-in-translation
assert type(WalletType.amount) == Decimal
assert type(WalletType.amount_imprecise) == Double
g.include(schema_registry)
debt = decimal.Decimal(-42.0)
WalletType.objects.create(amount=debt, amount_imprecise=0)
assert g.query(Wallet)[2].amount == -42
class OGMClassTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(OGMClassTestCase, self).__init__(*args, **kwargs)
self.g = None
def setUp(self):
g = self.g = Graph(Config.from_url('classes', 'root', 'root'
, initial_drop=True))
def testGraph(self):
g = self.g
try:
# The WRONG way to do multiple inheritance
# Here, Foo.registry and Bar.registry reference different classes,
# and therefore g.create_all() can not work.
class Foo(declarative_node()):
pass
class Bar(declarative_node()):
pass
class Fubar(Foo, Bar):
pass
except TypeError:
pass
else:
assert False and 'Failed to enforce correct vertex base classes.'
DateTimeNode = declarative_node()
class OGMDateTimeTestCase(unittest.TestCase):
class DateTimeV(DateTimeNode):
element_type = 'datetime'
element_plural = 'datetime'
name = String(nullable=False, unique=True)
at = DateTime(nullable=False)
class DateV(DateTimeNode):
element_type = 'dt'
element_plural = 'dt'
name = String(nullable=False, unique=True)
at = Date(nullable=False)
def setUp(self):
g = self.g = Graph(Config.from_url('test_datetime', 'root', 'root',
initial_drop=True))
g.create_all(DateTimeNode.registry)
def testDateTime(self):
g = self.g
# orientdb does not store microseconds
# so make sure the generated datetime has none
at = datetime.now().replace(microsecond=0)
g.datetime.create(name='now', at=at)
returned_dt = g.datetime.query(name='now').one()
assert returned_dt.at == at
# FIXME This returns microseconds, so there's nothing wrong with
# OrientDB's storage. What's breaking for the above case?
server_now = g.datetime.create(name='server_now', at=sysdate())
assert server_now.at >= returned_dt.at
def testDate(self):
g = self.g
at = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None).date()
g.dt.create(name='today', at=at)
returned_dt = g.dt.query(name='today').one()
assert returned_dt.at == at
UnicodeNode = declarative_node()
class UnicodeV(UnicodeNode):
element_type = 'unicode'
element_plural = 'unicode'
name = String(nullable=False, unique=True)
value = String(nullable=False)
alias = EmbeddedSet(linked_to=String(), nullable=True)
class OGMUnicodeTestCase(unittest.TestCase):
def setUp(self):
g = self.g = Graph(Config.from_url('test_unicode', 'root', 'root',
initial_drop=True))
g.create_all(UnicodeNode.registry)
def testUnicode(self):
g = self.g
data = [
(u'general_unicode', u'unicode value\u2017\u00c5'),
(u'special chars: single quote', u'\''),
(u'special chars: quote', u'"'),
(u'special chars: new line', u'\n'),
(u'special chars: tab', u'\t'),
(u'multiple special chars', u'\'"\n\t'),
]
for name, value in data:
g.unicode.create(name=name, value=value)
returned_v = g.unicode.query(name=name).one()
assert to_unicode(returned_v.value) == value
def testCommandEncoding(self):
g = self.g
name = u'unicode value\u2017'
aliases = [u'alias\u2017', u'alias\u00c5 2']
g.unicode.create(name=name, value=u'a', alias=aliases)
returned_v = g.unicode.query(name=name).one()
assert set(aliases) == set([to_unicode(a) for a in returned_v.alias])
class OGMTestCase(unittest.TestCase):
def testConfigs(self):
configs = [
'localhost:2424/test_config1',
'localhost/test_config2',
'plocal://localhost/test_config3',
'plocal://localhost:2424/test_config4',
'memory://localhost/test_config5',
'memory://localhost:2424/test_config6',
]
for conf in configs:
# the following line should not raise errors
Graph(Config.from_url(conf, 'root', 'root', initial_drop=True))
EmbeddedNode = declarative_node()
class OGMEmbeddedTestCase(unittest.TestCase):
class EmbeddedSetV(EmbeddedNode):
element_type = 'emb_set'
element_plural = 'emb_set'
name = String(nullable=False, unique=True)
alias = EmbeddedSet(nullable=False)
class EmbeddedMapV(EmbeddedNode):
element_type = 'emb_map'
element_plural = 'emb_map'
name = String(nullable=False, unique=True)
children = EmbeddedMap()
def setUp(self):
g = self.g = Graph(Config.from_url('test_embedded', 'root', 'root',
initial_drop=True))
g.create_all(EmbeddedNode.registry)
def testEmbeddedSetCreate(self):
g = self.g
# OrientDB currently has a bug that allows identical entries in EmbeddedSet:
# https://github.com/orientechnologies/orientdb/issues/3601
# This is not planned to be fixed until v3.0, so tolerate data
# returned as a list, and turn it to a set before the check for convenience
name = 'embed'
alias = ['implant', 'lodge', 'place']
g.emb_set.create(name=name, alias=alias)
result = g.emb_set.query(name=name).one()
self.assertSetEqual(set(alias), set(result.alias))
# now try the same operation, but pass a set instead of a list
name2 = 'embed2'
alias2 = set(alias)
g.emb_set.create(name=name2, alias=alias2)
result = g.emb_set.query(name=name2).one()
self.assertSetEqual(alias2, set(result.alias))
def testEmbeddedMapCreate(self):
g = self.g
name = 'embed_map'
children = {u'abc': u'def', 'x': 1}
g.emb_map.create(name=name, children=children)
result = g.emb_map.query(name=name).one()
# if dicts A and B are subsets of each other, then they are the same dict (by value)
self.assertDictContainsSubset(result.children, children)
self.assertDictContainsSubset(children, result.children)
def testEmbeddedSetContains(self):
g = self.g
name = 'embed'
alias = ['implant', 'lodge', 'place']
element_cls = g.registry['emb_set']
g.emb_set.create(name=name, alias=alias)
canonical_result = g.emb_set.query(name=name).one()
self.assertIsNotNone(canonical_result)
for alternate in alias:
received = g.query(element_cls).filter(element_cls.alias.contains(alternate)).one()
self.assertEqual(canonical_result, received)
class OGMEmbeddedDefaultsTestCase(unittest.TestCase):
def setUp(self):
g = self.g = Graph(Config.from_url('test_embedded_defaults', 'root', 'root',
initial_drop=True))
def testDefaultData(self):
g = self.g
g.client.command('CREATE CLASS DefaultEmbeddedNode EXTENDS V')
g.client.command('CREATE CLASS DefaultData')
g.client.command('CREATE PROPERTY DefaultData.normal Boolean')
g.client.command('CREATE PROPERTY DefaultEmbeddedNode.name String')
g.client.command('CREATE PROPERTY DefaultEmbeddedNode.info EmbeddedList DefaultData')
try:
g.client.command('ALTER PROPERTY DefaultData.normal DEFAULT 0')
except PyOrientSQLParsingException as e:
if "Unknown property attribute 'DEFAULT'" in e.errors[0]:
# The current OrientDB version (<2.1) doesn't allow default values.
# Simply skip this test, there's nothing we can test here.
return
else:
raise
base_node = declarative_node()
base_relationship = declarative_relationship()
g.include(g.build_mapping(base_node, base_relationship, auto_plural=True))
node = g.DefaultEmbeddedNode.create(name='default_embedded')
node.info = [{}]
try:
node.save()
except PyOrientCommandException as e:
if 'incompatible type is used.' in e.errors[0]:
# The current OrientDB version (<2.1.5) doesn't allow embedded classes,
# only embedded primitives (e.g. String or Int).
# Simply skip this test, there's nothing we can test here.
return
else:
raise
# On the next load, the node should have:
# 'info' = [{'normal': False}]
node = g.DefaultEmbeddedNode.query().one()
self.assertIn('normal', node.info[0])
self.assertIs(node.info[0]['normal'], False)
if sys.version_info[0] < 3:
def to_unicode(x):
return str(x).decode('utf-8')
else:
def to_unicode(x):
return str(x)
class OGMToposortTestCase(unittest.TestCase):
@staticmethod
def before(classes, bf, aft):
"""Test if bf is before aft in the classes list
Does not check if both exist
"""
for c in classes:
if c['name'] == bf:
return True
if c['name'] == aft:
return False
return False
def testToposort(self):
toposorted = Graph.toposort_classes([
{ 'name': 'A', 'superClasses': None},
{ 'name': 'B', 'superClasses': None},
{ 'name': 'C', 'superClasses': ['B']},
{ 'name': 'D', 'superClasses': ['E', 'F']},
{ 'name': 'E', 'superClasses': None},
{ 'name': 'F', 'superClasses': ['B']},
{ 'name': 'G', 'superClasses': None, 'properties': [{'linkedClass': 'H'}]},
{ 'name': 'H', 'superClasses': None}
])
assert set([c['name'] for c in toposorted]) == set(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
assert OGMToposortTestCase.before(toposorted, 'B', 'C')
assert OGMToposortTestCase.before(toposorted, 'E', 'D')
assert OGMToposortTestCase.before(toposorted, 'F', 'D')
assert OGMToposortTestCase.before(toposorted, 'B', 'F')
assert OGMToposortTestCase.before(toposorted, 'B', 'D')
assert OGMToposortTestCase.before(toposorted, 'H', 'G')
def testInfiniteLoop(self):
# Make sure that this at least stops in case of an infinite dependency loop
with self.assertRaises(AssertionError):
toposorted = Graph.toposort_classes([
{ 'name': 'A', 'superClasses': ['B']},
{ 'name': 'B', 'superClasses': ['A']}
])
HardwareNode = declarative_node()
HardwareRelationship = declarative_relationship()
class CPU(HardwareNode):
element_plural = 'cpu'
name = String(nullable=False)
class X86CPU(CPU):
element_plural = 'x86cpu'
version = Integer(nullable=True)
class Manufacturer(HardwareNode):
element_plural = 'manufacturer'
name = String(nullable=False)
class Manufactures(HardwareRelationship):
label = 'manufactures'
out_ = Link(linked_to=Manufacturer)
in_ = Link(linked_to=CPU)
# Added this to catch a nasty bug where toposort_classes overrode superClasses
# when reading schema from the database
class Outperforms(HardwareRelationship):
label = 'outperforms'
out_ = Link(linked_to=CPU)
in_ = Link(linked_to=CPU)
class OGMTypedEdgeTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(OGMTypedEdgeTestCase, self).__init__(*args, **kwargs)
self.g = None
def setUp(self):
g = self.g = Graph(Config.from_url('hardware', 'root', 'root'
, initial_drop=True))
g.create_all(HardwareNode.registry)
g.create_all(HardwareRelationship.registry)
def testConstraints(self):
g = self.g
pentium = g.cpu.create(name='Pentium')
intel = g.manufacturer.create(name='Intel')
# Now the constraints are enforced
with self.assertRaises(PyOrientCommandException):
g.manufactures.create(pentium, pentium)
g.manufactures.create(intel, pentium)
loaded_pentium = g.manufacturer.query().what(expand(distinct(out(Manufactures)))).all()
assert loaded_pentium == [pentium]
def testRegistryLoading(self):
g = self.g
database_registry = g.build_mapping(
declarative_node(), declarative_relationship(), auto_plural=True)
g.clear_registry()
g.include(database_registry)
manufactures_cls = g.registry['manufactures']
assert type(manufactures_cls.in_) == Link
assert manufactures_cls.in_.linked_to == g.registry['cpu']
class OGMTestInheritance(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(OGMTestInheritance, self).__init__(*args, **kwargs)
self.g = None
def setUp(self):
g = self.g = Graph(Config.from_url('hardware', 'root', 'root'
, initial_drop=True))
g.create_all(HardwareNode.registry)
g.create_all(HardwareRelationship.registry)
def testInheritance(self):
g = self.g
pentium = g.x86cpu.create(name='Pentium', version=6)
self.assertTrue(isinstance(pentium.name, str))
self.assertEquals('Pentium', pentium.name)
self.assertEquals(6, pentium.version)
loaded_pentium = g.get_vertex(pentium._id)
self.assertEquals(pentium, loaded_pentium)
self.assertTrue(isinstance(loaded_pentium.name, str))
def testStrictness(self):
g = self.g
# Unknown properties get silently dropped by default
pentium = g.cpu.create(name='Pentium', version=6)
loaded_pentium = g.get_vertex(pentium._id)
# Version is not defined in cpu
assert not hasattr(pentium, 'version')
# But in strict mode they generate errors
g = self.g = Graph(Config.from_url('hardware', 'root', 'root'
, initial_drop=False), strict=True)
g.include(g.build_mapping(
declarative_node(), declarative_relationship(), auto_plural=True))
with self.assertRaises(AttributeError):
pentium = g.cpu.create(name='Pentium', version=6)
pentium = g.x86cpu.create(name='Pentium', version=6)
self.assertEquals('Pentium', pentium.name)
self.assertEquals(6, pentium.version)
class OGMTestNullProperties(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(OGMTestNullProperties, self).__init__(*args, **kwargs)
self.g = None
def setUp(self):
g = self.g = Graph(Config.from_url('hardware', 'root', 'root'
, initial_drop=True))
g.create_all(HardwareNode.registry)
g.create_all(HardwareRelationship.registry)
def testInheritance(self):
g = self.g
pentium = g.x86cpu.create(name='Pentium')
loaded_pentium = g.get_vertex(pentium._id)
self.assertIsNone(loaded_pentium.version)
ClassFieldNode = declarative_node()
ClassFieldRelationship = declarative_relationship()
class ClassFieldVertex(ClassFieldNode):
name = String(nullable=False)
class ClassFieldVertex2(ClassFieldNode):
name = String(nullable=False)
class ClassFieldEdge(ClassFieldRelationship):
out_ = Link(linked_to=ClassFieldVertex)
in_ = Link(linked_to=ClassFieldVertex)
class OGMTestClassField(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(OGMTestClassField, self).__init__(*args, **kwargs)
self.g = None
def setUp(self):
g = self.g = Graph(Config.from_url('custom_field', 'root', 'root'
, initial_drop=True))
g.create_all(ClassFieldNode.registry)
g.create_all(ClassFieldRelationship.registry)
g.client.command('ALTER CLASS classfieldvertex CUSTOM test_field_1=test_string_one')
g.client.command('ALTER CLASS classfieldvertex CUSTOM test_field_2="test string two"')
g.client.command('ALTER CLASS classfieldedge CUSTOM test_field_1="test string two"')
def testCustomFields(self):
g = self.g
database_registry = g.build_mapping(
declarative_node(), declarative_relationship(), auto_plural=True)
g.clear_registry()
g.include(database_registry)
if g.server_version > (2,2,0): # Ugly! TODO Isolate version at which behaviour was changed
self.assertEquals(
{'test_field_1': 'test_string_one', 'test_field_2': 'test string two'},
g.registry['classfieldvertex'].class_fields)
self.assertEquals(
{'test_field_1': 'test string two'},
g.registry['classfieldedge'].class_fields)
else:
self.assertEquals(
{'test_field_1': 'test_string_one', 'test_field_2': '"test string two"'},
g.registry['classfieldvertex'].class_fields)
self.assertEquals(
{'test_field_1': '"test string two"'},
g.registry['classfieldedge'].class_fields)
self.assertEquals({}, g.registry['classfieldvertex2'].class_fields)
class OGMTestAbstractField(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(OGMTestAbstractField, self).__init__(*args, **kwargs)
self.g = None
def setUp(self):
g = self.g = Graph(Config.from_url('abstract_classes', 'root', 'root'
, initial_drop=True))
g.client.command('CREATE CLASS AbstractClass EXTENDS V ABSTRACT')
g.client.command('CREATE CLASS ConcreteClass EXTENDS V')
def testAbstractFlag(self):
g = self.g
database_registry = g.build_mapping(
declarative_node(), declarative_relationship(), auto_plural=True)
self.assertTrue(database_registry['AbstractClass'].abstract)
self.assertFalse(database_registry['ConcreteClass'].abstract)
|
|
from SPARQLWrapper import SPARQLWrapper, JSON
# from __init__ import QUERY_LIMIT
QUERY_LIMIT=""
import pandas as pd
import numpy as np
def run_query_with_datatype(query=None, endpoint=None, datatype=None):
"""
:param query: raw SPARQL query
:param endpoint: endpoint source that hosts the data
:param datatype: e.g. "http://dbpedia.org/datatype/centimetre" if None, it will return all datatypes
:return: query results with the matching datatype, query results not matching the given datatype
"""
results = run_query(query=query, endpoint=endpoint)
if len(results) > 0:
if len(results[0].keys()) == 1:
k = results[0].keys()[0]
if datatype is None:
correct_type_results = [r[k]["value"] for r in results]
wrong_type_results = []
else:
correct_type_results = [r[k]["value"] for r in results if r[k]["datatype"] == datatype]
wrong_type_results = [r[k]["value"] for r in results if r[k]["datatype"] != datatype]
return correct_type_results, wrong_type_results
else:
print "a query that results in multiple columns is not allowed"
# Because, if we allow having multiple columns, and the number of values in the first column
# that matches the given datatype might not be the same as the one in the second column
# which would results in unbalanced results
return [], []
def run_query(query=None, endpoint=None, raiseexception=False):
"""
:param query: raw SPARQL query
:param endpoint: endpoint source that hosts the data
:return: query result as a dict
"""
if endpoint is None:
print "endpoints cannot be None"
return []
sparql = SPARQLWrapper(endpoint=endpoint)
sparql.setQuery(query=query)
sparql.setMethod("POST")
sparql.setReturnFormat(JSON)
#sparql.setTimeout(300)
try:
results = sparql.query().convert()
if len(results["results"]["bindings"]) > 0:
return results["results"]["bindings"]
else:
print "returns 0 rows"
print "endpoint: "+endpoint
print "query: <%s>" % str(query).strip()
return []
except Exception as e:
print "sparql error: $$<%s>$$" % str(e)
print "query: $$<%s>$$" % str(query)
if raiseexception:
raise e
return []
def get_properties(endpoint=None, class_uri=None, min_count=20):
"""
:param endpoint: the meta endpoint
:param class_uri: with or without < and >
:param min_count:
:return: returns the properties and can be accessed as follows: properties[idx]['property']['value']
"""
class_uri_stripped = get_url_stripped(class_uri)
query = """
prefix loupe: <http://ont-loupe.linkeddata.es/def/core/>
prefix xsd: <http://www.w3.org/2001/XMLSchema#>
select distinct ?p as ?property ?count where {
graph <http://data.loupe.linked.es/dbpedia/1> {
?pp loupe:aboutClass <%s>;
loupe:aboutProperty ?p;
loupe:hasDatatypePartition ?pdp;
loupe:objectCount ?count .
{
?pdp loupe:datatype xsd:double .
} UNION {
?pdp loupe:datatype xsd:integer .
} UNION {
?pdp loupe:datatype xsd:decimal .
}
FILTER(?count > %d)
}
}
ORDER BY desc(?count)
%s
""" % (class_uri_stripped, min_count, QUERY_LIMIT)
properties = run_query(query=query, endpoint=endpoint)
return properties
def get_properties_as_list(endpoint=None, class_uri=None, min_count=20):
properties = get_properties(endpoint=endpoint, class_uri=class_uri, min_count=min_count)
clean_properties = [p['property'] for p in properties]
return pd.DataFrame(clean_properties)['value']
def get_objects(endpoint=None, class_uri=None, property_uri=None, isnumericfilter=True, failbacknofilter=True):
class_uri_stripped = class_uri.strip()
if class_uri_stripped[0] == "<" and class_uri_stripped[-1] == ">":
class_uri_stripped = class_uri_stripped[1:-1]
property_uri_stripped = property_uri.strip()
if property_uri_stripped[0] == "<" and property_uri_stripped[-1] == ">":
property_uri_stripped = property_uri_stripped[1:-1]
if isnumericfilter:
try:
query = """
select ?o where{ ?s a <%s>. ?s <%s> ?o FILTER(isNumeric(?o))} %s
""" % (class_uri_stripped, property_uri_stripped, QUERY_LIMIT)
objects = run_query(query=query, endpoint=endpoint, raiseexception=True)
except Exception as e:
if failbacknofilter:
print "fail back ... "
query = """
select ?o where{ ?s a <%s>. ?s <%s> ?o} %s
""" % (class_uri_stripped, property_uri_stripped, QUERY_LIMIT)
objects = run_query(query=query, endpoint=endpoint)
else:
query = """
select ?o where{ ?s a <%s>. ?s <%s> ?o} %s
""" % (class_uri_stripped, property_uri_stripped, QUERY_LIMIT)
objects = run_query(query=query, endpoint=endpoint)
return objects
def get_objects_as_list(endpoint=None, class_uri=None, property_uri=None, isnumericfilter=True):
objects = get_objects(endpoint=endpoint, class_uri=class_uri, property_uri=property_uri,
isnumericfilter=isnumericfilter)
clean_objects = [o['o'] for o in objects]
if len(clean_objects) == 0:
print "no objects found for class %s property %s in endpoint %s" % (class_uri, property_uri, endpoint)
col_mat = pd.DataFrame([]).as_matrix()
col_mat.shape = (0, 0)
return col_mat
# to get rid of the strings that can not be transformed into numbers
col_mat = pd.DataFrame(clean_objects)['value'].apply(pd.to_numeric, errors='coerce').dropna(how='any').as_matrix()
col_mat.shape = (col_mat.shape[0], 1)
col_mat = col_mat.astype(np.float)
# remove nan is any source: http://stackoverflow.com/questions/11620914/removing-nan-values-from-an-array
col_mat_num = col_mat[~np.isnan(col_mat)]
col_mat_num.shape = (col_mat_num.shape[0], 1)
if (col_mat.shape[0] - col_mat_num.shape[0]) < col_mat_num.shape[0]: # to check how clean is the data
return col_mat_num
else:
a = np.array([])
a.shape = (0, 1)
return a
def get_classes(endpoint=None):
if endpoint is None:
raise Exception("get_classes> endpoint should not be None")
query = """
select distinct ?Concept where { [] a ?Concept}
"""
results = run_query(endpoint=endpoint, query=query, raiseexception=True)
classes = [r['Concept']['value'] for r in results]
return classes
################################################################
# Property Extraction A-BOX #
################################################################
def split_upper_lower_bound(upper_bound=None, lower_bound=None, class_uri=None, endpoint=None, raiseexception=None,
isnumericfilter=None):
if upper_bound is None:
raise Exception("split_upper_lower_bound> upper_bound should not be None")
if lower_bound is None:
raise Exception("split_upper_lower_bound> lower_bound should not be None")
if class_uri is None:
raise Exception("split_upper_lower_bound> class_uri should not be None")
if endpoint is None:
raise Exception("split_upper_lower_bound> endpoint should not be None")
if raiseexception is None:
raise Exception("split_upper_lower_bound> raiseexception should not be None")
if isnumericfilter is None:
raise Exception("split_upper_lower_bound> isnumericfilter should not be None")
print "----------- split_upper_lower_bound -----------"
print "upper_bound: %d" % upper_bound
print "lower_bound: %d" % lower_bound
print "\n"
if upper_bound - lower_bound > 2:
split_point = int((upper_bound - lower_bound) / 2) + lower_bound
upper_results = get_numerical_properties_for_class_abox_using_half_split(endpoint=endpoint,
class_uri=class_uri,
raiseexception=raiseexception,
lower_bound=lower_bound,
upper_bound=split_point,
first_time=False,
isnumericfilter=isnumericfilter)
lower_results = get_numerical_properties_for_class_abox_using_half_split(endpoint=endpoint,
class_uri=class_uri,
raiseexception=raiseexception,
lower_bound=split_point,
upper_bound=upper_bound,
first_time=False,
isnumericfilter=isnumericfilter)
return upper_results + lower_results
else:
raise Exception("The endpoint is so slow or the timeout period is very short to query")
def get_numerical_properties_for_class_abox_using_half_split(endpoint=None, class_uri=None,
upper_bound=None, lower_bound=1,
raiseexception=False, first_time=None, max_iter=15,
isnumericfilter=True):
"""
:param endpoint:
:param class_uri:
:param raiseexception:
:param lower_bound:
:param upper_bound
:return:
"""
if class_uri is None:
print "get_numerical_properties_for_class_abox_using_half_split> class_uri should not be None"
return []
if upper_bound is None:
print "get_numerical_properties_for_class_abox_using_half_split> upper_bound should not be None"
return []
if first_time is None:
print "get_numerical_properties_for_class_abox_using_half_split> first_time should not be None"
return []
# just to see what is going on
print "========== get_numerical_properties_for_class_abox_using_half_split ========="
print "first time: %s" % str(first_time)
print "upper_bound: %d" % upper_bound
print "lower_bound: %d" % lower_bound
print "\n"
class_uri_stripped = get_url_stripped(class_uri)
if first_time:
if isnumericfilter:
query = """
SELECT ?p ?num
WHERE{
FILTER (?num > %d)
{
SELECT ?p (count(distinct ?s) as ?num)
WHERE {
?s a <%s>.
?s ?p []
}
group by ?p
}
{
SELECT distinct (?p)
WHERE{
?s ?p ?o
FILTER( isNumeric(?o))
}
}
}
order by desc(?num)
""" % (upper_bound, class_uri_stripped)
else:
query = """
SELECT ?p ?num
WHERE{
FILTER (?num > %d)
{
SELECT ?p (count(distinct ?s) as ?num)
WHERE {
?s a <%s>.
?s ?p []
}
group by ?p
}
}
order by desc(?num)
""" % (upper_bound, class_uri_stripped)
else:
if isnumericfilter:
query = """
SELECT ?p ?num
WHERE{
FILTER (?num >= %d && ?num <= %d)
{
SELECT ?p (count(distinct ?s) as ?num)
WHERE {
?s a <%s>.
?s ?p []
}
group by ?p
}
{
SELECT distinct (?p)
WHERE{
?s ?p ?o
FILTER( isNumeric(?o))
}
}
}
order by desc(?num)
""" % (lower_bound, upper_bound, class_uri_stripped)
else:
query = """
SELECT ?p ?num
WHERE{
FILTER (?num >= %d && ?num <= %d)
{
SELECT ?p (count(distinct ?s) as ?num)
WHERE {
?s a <%s>.
?s ?p []
}
group by ?p
}
}
order by desc(?num)
""" % (lower_bound, upper_bound, class_uri_stripped)
try:
print "will run the query"
results = run_query(query=query, endpoint=endpoint, raiseexception=True)
print "query returned"
properties = [r['p']['value'] for r in results]
print "fetching"
if not first_time:
print "returning properties"
print "properties"
print properties
return properties
else: # first time
print "returning firsttime"
return properties + split_upper_lower_bound(upper_bound=upper_bound, lower_bound=lower_bound,
class_uri=class_uri_stripped, endpoint=endpoint,
raiseexception=raiseexception, isnumericfilter=isnumericfilter)
except Exception as e:
if "timed out" in str(e):
if not first_time:
return split_upper_lower_bound(upper_bound=upper_bound, lower_bound=lower_bound,
class_uri=class_uri_stripped,
endpoint=endpoint, raiseexception=raiseexception,
isnumericfilter=isnumericfilter)
else: # first time
if max_iter == 0:
if raiseexception:
raise Exception("reached iteration limit and the timeout still occurs")
else:
print "reached iteration limit and the timeout still occurs"
return []
return get_numerical_properties_for_class_abox_using_half_split(endpoint=endpoint, class_uri=class_uri_stripped
, upper_bound=upper_bound*2,
lower_bound=lower_bound,
raiseexception=raiseexception,
first_time=True,
max_iter=max_iter-1,
isnumericfilter=isnumericfilter)
# split_upper_lower_bound(upper_bound=upper_bound, lower_bound=lower_bound, class_uri=class_uri_stripped,
# endpoint=endpoint, raiseexception=raiseexception)
elif "'isNumeric'" in str(e) and first_time:
print "get_numerical_properties_for_class_abox_using_half_split> isNumeric is not supported, so we gonna ignore it"
return get_numerical_properties_for_class_abox_using_half_split(endpoint=endpoint, class_uri=class_uri,
upper_bound=upper_bound,
lower_bound=lower_bound,
raiseexception=raiseexception,
first_time=first_time, max_iter=max_iter,
isnumericfilter=False)
else:
if raiseexception:
print "captured %s" % str(e)
raise e
else:
print "get_numerical_properties_for_class_abox_using_half_split> an exception occurred: %s" % str(e)
return []
def get_numerical_properties_for_class_abox(endpoint=None, class_uri=None, raiseexception=False):
"""
a naive approach to get all numerical properties for a given class using the data itself A-BOX
:param endpoint: endpoint
:param class_uri: class uri for the class
:return:
"""
if class_uri is None:
print "get_numerical_properties_for_class_abox> class_uri should not be None"
return []
class_uri_stripped = get_url_stripped(class_uri)
query = """
select ?p count(distinct ?s) as ?num where {
?s a <%s>.
?s ?p ?o
FILTER(isNumeric(?o))
}
group by ?p
order by desc(?num)
""" % class_uri_stripped
results = run_query(query=query, endpoint=endpoint, raiseexception=raiseexception)
properties = [r['p']['value'] for r in results]
return properties
def get_properties_for_class_abox(endpoint=None, class_uri=None, raiseexception=False):
"""
a naive approach to get all numerical properties for a given class using the data itself A-BOX
:param endpoint: endpoint
:param class_uri: class uri for the class
:return:
"""
if class_uri is None:
print "get_numerical_properties_for_class_abox> class_uri should not be None"
return []
class_uri_stripped = get_url_stripped(class_uri)
query = """
SELECT ?p (count(distinct ?s) as ?num)
WHERE {
?s a <%s>.
?s ?p []
}
group by ?p
order by desc(?num)
""" % class_uri_stripped
results = run_query(query=query, endpoint=endpoint, raiseexception=raiseexception)
properties = [r['p']['value'] for r in results]
return properties
################################################################
# Property Extraction T-BOX #
################################################################
def get_numerical_properties_for_class_tbox(endpoint=None, class_uri=None):
"""
get all numerical properties for a given class using the domain and range (T-BOX)
:param endpoint: endpoint
:param class_uri: class uri for the class
:return: properties
"""
if class_uri is None:
print "get_numerical_properties_for_class_tbox> class_uri should not be None"
return []
class_uri_stripped = get_url_stripped(class_uri)
query = """
select distinct ?pt where{
?pt rdfs:domain <%s>.
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#float>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#double>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#decimal>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#integer>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#nonPositiveInteger>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#negativeInteger>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#long>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#int>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#short>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#byte>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#nonNegativeInteger>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#unsignedLong>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#unsignedInt>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#unsignedShort>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#unsignedByte>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#positiveInteger>}
}
""" % class_uri_stripped
results = run_query(query=query, endpoint=endpoint)
properties = [r['pt']['value'] for r in results]
return properties
def get_all_classes_properties_numerical(endpoint=None):
"""
search for all class/property combinations with numerical objects. here we are relying on the defined
structure using rdfs:range and rdfs:domain (TBOX) and not on the data level
:param endpoint:
:return: a list of class/property combinations. in case of no results or error, it will return []
"""
if endpoint is None:
print "get_all_classes_properties_numerical> endpoint should not be None"
return []
query = """
select distinct ?pt ?c where{
?pt rdfs:domain ?c.
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#float>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#double>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#decimal>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#integer>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#nonPositiveInteger>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#negativeInteger>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#long>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#int>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#short>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#byte>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#nonNegativeInteger>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#unsignedLong>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#unsignedInt>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#unsignedShort>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#unsignedByte>} UNION
{?pt rdfs:range <http://www.w3.org/2001/XMLSchema#positiveInteger>}
}
"""
results = run_query(query=query, endpoint=endpoint)
class_property_uris = [(r['c']['value'], r['pt']['value']) for r in results]
print "get_all_classes_properties_numerical> class_property_uris:"
print class_property_uris
return class_property_uris
#####################################################################
# Online Text Annotation #
#####################################################################
def get_entities(subject_name, endpoint):
"""
assuming only in the form of name@en. To be extended to other languages and other types e.g. name^^someurltype
:param subject_name:
:return:
"""
query = """
select distinct ?s where{
?s ?p "%s"@en
}
""" % (subject_name)
results = run_query(query=query, endpoint=endpoint)
entities = [r['s']['value'] for r in results]
return entities
def get_classes(entity, endpoint, hierarchy):
"""
:param entity:
:param endpoint:
:param hierarchy:
:return:
"""
if hierarchy:
query = """
select distinct ?c where{
<%s> a ?cc.
?cc rdfs:subClassOf* ?c.
}
""" % entity
else:
query = """
select distinct ?c where{
<%s> a ?c
}
""" % entity
results = run_query(query=query, endpoint=endpoint)
classes = [r['c']['value'] for r in results]
return classes
def get_classes_not_in(classes, endpoint):
"""
This is to get more specific classes (e.g. given "thing", "soccer player", "person" it returns "soccer player")
:param classes:
:param endpoint:
:return:
"""
my_classes = ",".join(["<"+c+">" for c in classes])
query = """
select ?ech where{
?ech a [].
FILTER(?ech IN (%s)).
MINUS{
?ec a [].
FILTER (?ec NOT IN (%s)).
?ec rdfs:subClassOf+ ?ech.
}
}
""" % (my_classes, my_classes)
results = run_query(query=query, endpoint=endpoint)
classes = [r['ech']['value'] for r in results]
return classes
def get_classes_with_parents(classes, endpoint):
"""
This to filter out classes that does not have parents
:param classes:
:param endpoint:
:return:
"""
my_classes = ",".join(["<"+c+">" for c in classes])
query = """
select ?c where{
?c rdfs:subClassOf ?parent
FILTER(?c IN (%s)).
}
""" % (my_classes)
results = run_query(query=query, endpoint=endpoint)
classes = [r['c']['value'] for r in results]
return classes
def get_parents_of_class(class_name, endpoint):
"""
get the parent class of the given class, get the first results in case of multiple ones
:param class_name:
:param endpoint:
:return:
"""
query = """
select distinct ?c where{
<%s> rdfs:subClassOf ?c.
}
""" % class_name
results = run_query(query=query, endpoint=endpoint)
classes = [r['c']['value'] for r in results]
return classes
# iteration 8
def get_classes_subjects_count(classes, endpoint):
print "in get_classes_subjects_count"
d = {}
for c in classes:
num = get_num_class_subjects(c, endpoint)
d[c] = int(num)
return d
def get_num_class_subjects(class_uri, endpoint):
print "count subject for class %s" % class_uri
query = """
select count(?s) as ?num
where {
?s a ?c.
?c rdfs:subClassOf* <%s>.
}
""" % class_uri
results = run_query(query=query, endpoint=endpoint)
return results[0]['num']['value']
#####################################################################
# Helper Functions #
#####################################################################
def get_url_stripped(uri):
"""
:param uri: <myuri> or uri
:return: myuri
"""
uri_stripped = uri.strip()
if uri_stripped[0] == "<":
uri_stripped = uri_stripped[1:]
if uri_stripped[-1] == ">":
uri_stripped = uri_stripped[:-1]
return uri_stripped
|
|
from typing import List
from typing import Optional
from sqlalchemy import Boolean
from sqlalchemy import ForeignKey
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.orm import clear_mappers
from sqlalchemy.orm import declared_attr
from sqlalchemy.orm import registry as declarative_registry
from sqlalchemy.orm import registry
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
try:
import dataclasses
except ImportError:
pass
class DataclassesTest(fixtures.MappedTest, testing.AssertsCompiledSQL):
@classmethod
def define_tables(cls, metadata):
Table(
"accounts",
metadata,
Column("account_id", Integer, primary_key=True),
Column("widget_count", Integer, nullable=False),
)
Table(
"widgets",
metadata,
Column("widget_id", Integer, primary_key=True),
Column(
"account_id",
Integer,
ForeignKey("accounts.account_id"),
nullable=False,
),
Column("type", String(30), nullable=False),
Column("name", String(30), nullable=False),
Column("magic", Boolean),
)
@classmethod
def setup_classes(cls):
@dataclasses.dataclass
class Widget:
name: Optional[str] = None
@dataclasses.dataclass
class SpecialWidget(Widget):
magic: bool = False
@dataclasses.dataclass
class Account:
account_id: int
widgets: List[Widget] = dataclasses.field(default_factory=list)
widget_count: int = dataclasses.field(init=False)
def __post_init__(self):
self.widget_count = len(self.widgets)
def add_widget(self, widget: Widget):
self.widgets.append(widget)
self.widget_count += 1
cls.classes.Account = Account
cls.classes.Widget = Widget
cls.classes.SpecialWidget = SpecialWidget
@classmethod
def setup_mappers(cls):
accounts = cls.tables.accounts
widgets = cls.tables.widgets
Account = cls.classes.Account
Widget = cls.classes.Widget
SpecialWidget = cls.classes.SpecialWidget
cls.mapper_registry.map_imperatively(
Widget,
widgets,
polymorphic_on=widgets.c.type,
polymorphic_identity="normal",
)
cls.mapper_registry.map_imperatively(
SpecialWidget,
widgets,
inherits=Widget,
polymorphic_identity="special",
)
cls.mapper_registry.map_imperatively(
Account, accounts, properties={"widgets": relationship(Widget)}
)
def check_account_dataclass(self, obj):
assert dataclasses.is_dataclass(obj)
account_id, widgets, widget_count = dataclasses.fields(obj)
eq_(account_id.name, "account_id")
eq_(widget_count.name, "widget_count")
eq_(widgets.name, "widgets")
def check_widget_dataclass(self, obj):
assert dataclasses.is_dataclass(obj)
(name,) = dataclasses.fields(obj)
eq_(name.name, "name")
def check_special_widget_dataclass(self, obj):
assert dataclasses.is_dataclass(obj)
name, magic = dataclasses.fields(obj)
eq_(name.name, "name")
eq_(magic.name, "magic")
def data_fixture(self):
Account = self.classes.Account
Widget = self.classes.Widget
SpecialWidget = self.classes.SpecialWidget
return Account(
account_id=42,
widgets=[Widget("Foo"), SpecialWidget("Bar", magic=True)],
)
def check_data_fixture(self, account):
Widget = self.classes.Widget
SpecialWidget = self.classes.SpecialWidget
self.check_account_dataclass(account)
eq_(account.account_id, 42)
eq_(account.widget_count, 2)
eq_(len(account.widgets), 2)
foo, bar = account.widgets
self.check_widget_dataclass(foo)
assert isinstance(foo, Widget)
eq_(foo.name, "Foo")
self.check_special_widget_dataclass(bar)
assert isinstance(bar, SpecialWidget)
eq_(bar.name, "Bar")
eq_(bar.magic, True)
def test_classes_are_still_dataclasses(self):
self.check_account_dataclass(self.classes.Account)
self.check_widget_dataclass(self.classes.Widget)
self.check_special_widget_dataclass(self.classes.SpecialWidget)
def test_construction(self):
SpecialWidget = self.classes.SpecialWidget
account = self.data_fixture()
self.check_data_fixture(account)
widget = SpecialWidget()
eq_(widget.name, None)
eq_(widget.magic, False)
def test_equality(self):
Widget = self.classes.Widget
SpecialWidget = self.classes.SpecialWidget
eq_(Widget("Foo"), Widget("Foo"))
assert Widget("Foo") != Widget("Bar")
assert Widget("Foo") != SpecialWidget("Foo")
def test_asdict_and_astuple_widget(self):
Widget = self.classes.Widget
widget = Widget("Foo")
eq_(dataclasses.asdict(widget), {"name": "Foo"})
eq_(dataclasses.astuple(widget), ("Foo",))
def test_asdict_and_astuple_special_widget(self):
SpecialWidget = self.classes.SpecialWidget
widget = SpecialWidget("Bar", magic=True)
eq_(dataclasses.asdict(widget), {"name": "Bar", "magic": True})
eq_(dataclasses.astuple(widget), ("Bar", True))
def test_round_trip(self):
Account = self.classes.Account
account = self.data_fixture()
with fixture_session() as session:
session.add(account)
session.commit()
with fixture_session() as session:
a = session.get(Account, 42)
self.check_data_fixture(a)
def test_appending_to_relationship(self):
Account = self.classes.Account
Widget = self.classes.Widget
account = self.data_fixture()
with Session(testing.db) as session, session.begin():
session.add(account)
account.add_widget(Widget("Xyzzy"))
with Session(testing.db) as session:
a = session.get(Account, 42)
eq_(a.widget_count, 3)
eq_(len(a.widgets), 3)
def test_filtering_on_relationship(self):
Account = self.classes.Account
Widget = self.classes.Widget
account = self.data_fixture()
with Session(testing.db) as session:
session.add(account)
session.commit()
with Session(testing.db) as session:
a = (
session.query(Account)
.join(Account.widgets)
.filter(Widget.name == "Foo")
.one()
)
self.check_data_fixture(a)
class PlainDeclarativeDataclassesTest(DataclassesTest):
run_setup_classes = "each"
run_setup_mappers = "each"
@classmethod
def setup_classes(cls):
accounts = cls.tables.accounts
widgets = cls.tables.widgets
declarative = declarative_registry().mapped
@declarative
@dataclasses.dataclass
class Widget:
__table__ = widgets
name: Optional[str] = None
__mapper_args__ = dict(
polymorphic_on=widgets.c.type,
polymorphic_identity="normal",
)
@declarative
@dataclasses.dataclass
class SpecialWidget(Widget):
magic: bool = False
__mapper_args__ = dict(
polymorphic_identity="special",
)
@declarative
@dataclasses.dataclass
class Account:
__table__ = accounts
account_id: int
widgets: List[Widget] = dataclasses.field(default_factory=list)
widget_count: int = dataclasses.field(init=False)
__mapper_args__ = dict(
properties=dict(widgets=relationship("Widget"))
)
def __post_init__(self):
self.widget_count = len(self.widgets)
def add_widget(self, widget: Widget):
self.widgets.append(widget)
self.widget_count += 1
cls.classes.Account = Account
cls.classes.Widget = Widget
cls.classes.SpecialWidget = SpecialWidget
@classmethod
def setup_mappers(cls):
pass
class FieldEmbeddedDeclarativeDataclassesTest(
fixtures.DeclarativeMappedTest, DataclassesTest
):
@classmethod
def setup_classes(cls):
declarative = cls.DeclarativeBasic.registry.mapped
@declarative
@dataclasses.dataclass
class Widget:
__tablename__ = "widgets"
__sa_dataclass_metadata_key__ = "sa"
widget_id = Column(Integer, primary_key=True)
account_id = Column(
Integer,
ForeignKey("accounts.account_id"),
nullable=False,
)
type = Column(String(30), nullable=False)
name: Optional[str] = dataclasses.field(
default=None,
metadata={"sa": Column(String(30), nullable=False)},
)
__mapper_args__ = dict(
polymorphic_on="type",
polymorphic_identity="normal",
)
@declarative
@dataclasses.dataclass
class SpecialWidget(Widget):
__sa_dataclass_metadata_key__ = "sa"
magic: bool = dataclasses.field(
default=False, metadata={"sa": Column(Boolean)}
)
__mapper_args__ = dict(
polymorphic_identity="special",
)
@declarative
@dataclasses.dataclass
class Account:
__tablename__ = "accounts"
__sa_dataclass_metadata_key__ = "sa"
account_id: int = dataclasses.field(
metadata={"sa": Column(Integer, primary_key=True)},
)
widgets: List[Widget] = dataclasses.field(
default_factory=list, metadata={"sa": relationship("Widget")}
)
widget_count: int = dataclasses.field(
init=False,
metadata={
"sa": Column("widget_count", Integer, nullable=False)
},
)
def __post_init__(self):
self.widget_count = len(self.widgets)
def add_widget(self, widget: Widget):
self.widgets.append(widget)
self.widget_count += 1
cls.classes.Account = Account
cls.classes.Widget = Widget
cls.classes.SpecialWidget = SpecialWidget
@classmethod
def setup_mappers(cls):
pass
@classmethod
def define_tables(cls, metadata):
pass
def test_asdict_and_astuple_widget(self):
Widget = self.classes.Widget
widget = Widget("Foo")
eq_(dataclasses.asdict(widget), {"name": "Foo"})
eq_(dataclasses.astuple(widget), ("Foo",))
def test_asdict_and_astuple_special_widget(self):
SpecialWidget = self.classes.SpecialWidget
widget = SpecialWidget("Bar", magic=True)
eq_(dataclasses.asdict(widget), {"name": "Bar", "magic": True})
eq_(dataclasses.astuple(widget), ("Bar", True))
class FieldEmbeddedWMixinTest(FieldEmbeddedDeclarativeDataclassesTest):
@classmethod
def setup_classes(cls):
declarative = cls.DeclarativeBasic.registry.mapped
@dataclasses.dataclass
class SurrogateWidgetPK:
__sa_dataclass_metadata_key__ = "sa"
widget_id: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, primary_key=True)},
)
@declarative
@dataclasses.dataclass
class Widget(SurrogateWidgetPK):
__tablename__ = "widgets"
__sa_dataclass_metadata_key__ = "sa"
account_id = Column(
Integer,
ForeignKey("accounts.account_id"),
nullable=False,
)
type = Column(String(30), nullable=False)
name: Optional[str] = dataclasses.field(
default=None,
metadata={"sa": Column(String(30), nullable=False)},
)
__mapper_args__ = dict(
polymorphic_on="type",
polymorphic_identity="normal",
)
@declarative
@dataclasses.dataclass
class SpecialWidget(Widget):
__sa_dataclass_metadata_key__ = "sa"
magic: bool = dataclasses.field(
default=False, metadata={"sa": Column(Boolean)}
)
__mapper_args__ = dict(
polymorphic_identity="special",
)
@dataclasses.dataclass
class SurrogateAccountPK:
__sa_dataclass_metadata_key__ = "sa"
account_id = Column(
"we_dont_want_to_use_this", Integer, primary_key=True
)
@declarative
@dataclasses.dataclass
class Account(SurrogateAccountPK):
__tablename__ = "accounts"
__sa_dataclass_metadata_key__ = "sa"
account_id: int = dataclasses.field(
metadata={"sa": Column(Integer, primary_key=True)},
)
widgets: List[Widget] = dataclasses.field(
default_factory=list, metadata={"sa": relationship("Widget")}
)
widget_count: int = dataclasses.field(
init=False,
metadata={
"sa": Column("widget_count", Integer, nullable=False)
},
)
def __post_init__(self):
self.widget_count = len(self.widgets)
def add_widget(self, widget: Widget):
self.widgets.append(widget)
self.widget_count += 1
cls.classes.Account = Account
cls.classes.Widget = Widget
cls.classes.SpecialWidget = SpecialWidget
def check_widget_dataclass(self, obj):
assert dataclasses.is_dataclass(obj)
(
id_,
name,
) = dataclasses.fields(obj)
eq_(name.name, "name")
eq_(id_.name, "widget_id")
def check_special_widget_dataclass(self, obj):
assert dataclasses.is_dataclass(obj)
id_, name, magic = dataclasses.fields(obj)
eq_(id_.name, "widget_id")
eq_(name.name, "name")
eq_(magic.name, "magic")
def test_asdict_and_astuple_widget(self):
Widget = self.classes.Widget
widget = Widget("Foo")
eq_(dataclasses.asdict(widget), {"name": "Foo", "widget_id": None})
eq_(
dataclasses.astuple(widget),
(
None,
"Foo",
),
)
def test_asdict_and_astuple_special_widget(self):
SpecialWidget = self.classes.SpecialWidget
widget = SpecialWidget("Bar", magic=True)
eq_(
dataclasses.asdict(widget),
{"name": "Bar", "magic": True, "widget_id": None},
)
eq_(dataclasses.astuple(widget), (None, "Bar", True))
class FieldEmbeddedMixinWLambdaTest(fixtures.DeclarativeMappedTest):
@classmethod
def setup_classes(cls):
declarative = cls.DeclarativeBasic.registry.mapped
@dataclasses.dataclass
class WidgetDC:
__sa_dataclass_metadata_key__ = "sa"
widget_id: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, primary_key=True)},
)
# fk on mixin
account_id: int = dataclasses.field(
init=False,
metadata={
"sa": lambda: Column(
Integer,
ForeignKey("accounts.account_id"),
nullable=False,
)
},
)
has_a_default: str = dataclasses.field(
default="some default",
metadata={"sa": lambda: Column(String(50))},
)
@declarative
@dataclasses.dataclass
class Widget(WidgetDC):
__tablename__ = "widgets"
__sa_dataclass_metadata_key__ = "sa"
type = Column(String(30), nullable=False)
name: Optional[str] = dataclasses.field(
default=None,
metadata={"sa": Column(String(30), nullable=False)},
)
__mapper_args__ = dict(
polymorphic_on="type",
polymorphic_identity="normal",
)
@declarative
@dataclasses.dataclass
class SpecialWidget(Widget):
__tablename__ = "special_widgets"
__sa_dataclass_metadata_key__ = "sa"
special_widget_id: int = dataclasses.field(
init=False,
metadata={
"sa": Column(
ForeignKey("widgets.widget_id"), primary_key=True
)
},
)
magic: bool = dataclasses.field(
default=False, metadata={"sa": Column(Boolean)}
)
__mapper_args__ = dict(
polymorphic_identity="special",
)
@dataclasses.dataclass
class AccountDC:
__sa_dataclass_metadata_key__ = "sa"
# relationship on mixin
widgets: List[Widget] = dataclasses.field(
default_factory=list,
metadata={"sa": lambda: relationship("Widget")},
)
account_id: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, primary_key=True)},
)
widget_count: int = dataclasses.field(
init=False,
metadata={
"sa": Column("widget_count", Integer, nullable=False)
},
)
@declarative
class Account(AccountDC):
__tablename__ = "accounts"
__sa_dataclass_metadata_key__ = "sa"
def __post_init__(self):
self.widget_count = len(self.widgets)
def add_widget(self, widget: Widget):
self.widgets.append(widget)
self.widget_count += 1
@declarative
@dataclasses.dataclass
class User:
__tablename__ = "user"
__sa_dataclass_metadata_key__ = "sa"
user_id: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, primary_key=True)},
)
# fk w declared attr on mapped class
account_id: int = dataclasses.field(
init=False,
metadata={
"sa": lambda: Column(
Integer,
ForeignKey("accounts.account_id"),
nullable=False,
)
},
)
cls.classes["Account"] = Account
cls.classes["Widget"] = Widget
cls.classes["User"] = User
cls.classes["SpecialWidget"] = SpecialWidget
def test_setup(self):
Account, Widget, User, SpecialWidget = self.classes(
"Account", "Widget", "User", "SpecialWidget"
)
assert "account_id" in Widget.__table__.c
assert list(Widget.__table__.c.account_id.foreign_keys)[0].references(
Account.__table__
)
assert inspect(Account).relationships.widgets.mapper is inspect(Widget)
assert "account_id" not in SpecialWidget.__table__.c
assert "has_a_default" in Widget.__table__.c
assert "has_a_default" not in SpecialWidget.__table__.c
assert "account_id" in User.__table__.c
assert list(User.__table__.c.account_id.foreign_keys)[0].references(
Account.__table__
)
def test_asdict_and_astuple_special_widget(self):
SpecialWidget = self.classes.SpecialWidget
widget = SpecialWidget(magic=True)
eq_(
dataclasses.asdict(widget),
{
"widget_id": None,
"account_id": None,
"has_a_default": "some default",
"name": None,
"special_widget_id": None,
"magic": True,
},
)
eq_(
dataclasses.astuple(widget),
(None, None, "some default", None, None, True),
)
class FieldEmbeddedMixinWDeclaredAttrTest(FieldEmbeddedMixinWLambdaTest):
@classmethod
def setup_classes(cls):
declarative = cls.DeclarativeBasic.registry.mapped
@dataclasses.dataclass
class WidgetDC:
__sa_dataclass_metadata_key__ = "sa"
widget_id: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, primary_key=True)},
)
# fk on mixin
account_id: int = dataclasses.field(
init=False,
metadata={
"sa": declared_attr(
lambda: Column(
Integer,
ForeignKey("accounts.account_id"),
nullable=False,
)
)
},
)
has_a_default: str = dataclasses.field(
default="some default",
metadata={"sa": declared_attr(lambda: Column(String(50)))},
)
@declarative
@dataclasses.dataclass
class Widget(WidgetDC):
__tablename__ = "widgets"
__sa_dataclass_metadata_key__ = "sa"
type = Column(String(30), nullable=False)
name: Optional[str] = dataclasses.field(
default=None,
metadata={"sa": Column(String(30), nullable=False)},
)
__mapper_args__ = dict(
polymorphic_on="type",
polymorphic_identity="normal",
)
@declarative
@dataclasses.dataclass
class SpecialWidget(Widget):
__tablename__ = "special_widgets"
__sa_dataclass_metadata_key__ = "sa"
special_widget_id: int = dataclasses.field(
init=False,
metadata={
"sa": Column(
ForeignKey("widgets.widget_id"), primary_key=True
)
},
)
magic: bool = dataclasses.field(
default=False, metadata={"sa": Column(Boolean)}
)
__mapper_args__ = dict(
polymorphic_identity="special",
)
@dataclasses.dataclass
class AccountDC:
__sa_dataclass_metadata_key__ = "sa"
# relationship on mixin
widgets: List[Widget] = dataclasses.field(
default_factory=list,
metadata={"sa": declared_attr(lambda: relationship("Widget"))},
)
account_id: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, primary_key=True)},
)
widget_count: int = dataclasses.field(
init=False,
metadata={
"sa": Column("widget_count", Integer, nullable=False)
},
)
@declarative
class Account(AccountDC):
__tablename__ = "accounts"
__sa_dataclass_metadata_key__ = "sa"
def __post_init__(self):
self.widget_count = len(self.widgets)
def add_widget(self, widget: Widget):
self.widgets.append(widget)
self.widget_count += 1
@declarative
@dataclasses.dataclass
class User:
__tablename__ = "user"
__sa_dataclass_metadata_key__ = "sa"
user_id: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, primary_key=True)},
)
# fk w declared attr on mapped class
account_id: int = dataclasses.field(
init=False,
metadata={
"sa": declared_attr(
lambda: Column(
Integer,
ForeignKey("accounts.account_id"),
nullable=False,
)
)
},
)
cls.classes["Account"] = Account
cls.classes["Widget"] = Widget
cls.classes["User"] = User
cls.classes["SpecialWidget"] = SpecialWidget
class PropagationFromMixinTest(fixtures.TestBase):
def test_propagate_w_plain_mixin_col(self, run_test):
@dataclasses.dataclass
class CommonMixin:
__sa_dataclass_metadata_key__ = "sa"
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
__table_args__ = {"mysql_engine": "InnoDB"}
timestamp = Column(Integer)
run_test(CommonMixin)
def test_propagate_w_field_mixin_col(self, run_test):
@dataclasses.dataclass
class CommonMixin:
__sa_dataclass_metadata_key__ = "sa"
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
__table_args__ = {"mysql_engine": "InnoDB"}
timestamp: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, nullable=False)},
)
run_test(CommonMixin)
def test_propagate_w_field_mixin_col_and_default(self, run_test):
@dataclasses.dataclass
class CommonMixin:
__sa_dataclass_metadata_key__ = "sa"
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
__table_args__ = {"mysql_engine": "InnoDB"}
timestamp: int = dataclasses.field(
init=False,
default=12,
metadata={"sa": Column(Integer, nullable=False)},
)
run_test(CommonMixin)
@testing.fixture()
def run_test(self):
def go(CommonMixin):
declarative = registry().mapped
@declarative
@dataclasses.dataclass
class BaseType(CommonMixin):
discriminator = Column("type", String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
id = Column(Integer, primary_key=True)
value = Column(Integer())
@declarative
@dataclasses.dataclass
class Single(BaseType):
__tablename__ = None
__mapper_args__ = dict(polymorphic_identity="type1")
@declarative
@dataclasses.dataclass
class Joined(BaseType):
__mapper_args__ = dict(polymorphic_identity="type2")
id = Column(
Integer, ForeignKey("basetype.id"), primary_key=True
)
eq_(BaseType.__table__.name, "basetype")
eq_(
list(BaseType.__table__.c.keys()),
["type", "id", "value", "timestamp"],
)
eq_(BaseType.__table__.kwargs, {"mysql_engine": "InnoDB"})
assert Single.__table__ is BaseType.__table__
eq_(Joined.__table__.name, "joined")
eq_(list(Joined.__table__.c.keys()), ["id"])
eq_(Joined.__table__.kwargs, {"mysql_engine": "InnoDB"})
yield go
clear_mappers()
class PropagationFromAbstractTest(fixtures.TestBase):
def test_propagate_w_plain_mixin_col(self, run_test):
@dataclasses.dataclass
class BaseType:
__sa_dataclass_metadata_key__ = "sa"
__table_args__ = {"mysql_engine": "InnoDB"}
discriminator: str = Column("type", String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
id: int = Column(Integer, primary_key=True)
value: int = Column(Integer())
timestamp: int = Column(Integer)
run_test(BaseType)
def test_propagate_w_field_mixin_col(self, run_test):
@dataclasses.dataclass
class BaseType:
__sa_dataclass_metadata_key__ = "sa"
__table_args__ = {"mysql_engine": "InnoDB"}
discriminator: str = Column("type", String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
id: int = Column(Integer, primary_key=True)
value: int = Column(Integer())
timestamp: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, nullable=False)},
)
run_test(BaseType)
def test_propagate_w_field_mixin_col_and_default(self, run_test):
@dataclasses.dataclass
class BaseType:
__sa_dataclass_metadata_key__ = "sa"
__table_args__ = {"mysql_engine": "InnoDB"}
discriminator: str = Column("type", String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
id: int = Column(Integer, primary_key=True)
value: int = Column(Integer())
timestamp: int = dataclasses.field(
init=False,
default=None,
metadata={"sa": Column(Integer, nullable=False)},
)
run_test(BaseType)
@testing.fixture()
def run_test(self):
def go(BaseType):
declarative = registry().mapped
@declarative
@dataclasses.dataclass
class Single(BaseType):
__tablename__ = "single"
__mapper_args__ = dict(polymorphic_identity="type1")
@declarative
@dataclasses.dataclass
class Joined(Single):
__tablename__ = "joined"
__mapper_args__ = dict(polymorphic_identity="type2")
id = Column(Integer, ForeignKey("single.id"), primary_key=True)
eq_(Single.__table__.name, "single")
eq_(
list(Single.__table__.c.keys()),
["type", "id", "value", "timestamp"],
)
eq_(Single.__table__.kwargs, {"mysql_engine": "InnoDB"})
eq_(Joined.__table__.name, "joined")
eq_(list(Joined.__table__.c.keys()), ["id"])
eq_(Joined.__table__.kwargs, {"mysql_engine": "InnoDB"})
yield go
clear_mappers()
|
|
#!/usr/bin/env python
# Copyright (c) 2015, Job Snijders
# Copyright (c) 2015, NORDUnet A/S
#
# This file is part of IRR Explorer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import time
SOURCE = 'source'
RIPE = 'ripe'
BGP = 'bgp'
# IRR_DBS = ['afrinic', 'altdb', 'apnic', 'arin', 'bboi', 'bell', 'gt', 'jpirr', 'level3', 'nttcom', 'radb', 'rgnet', 'savvis', 'tc', 'ripe']
class NoPrefixError(Exception):
pass
def add_prefix_advice(prefixes):
# default, primary, succes, info, warning, danger
for pfx, pfx_data in prefixes.items():
print 'Prefix: %s, data: %s' % (pfx, pfx_data)
pfx_source = pfx_data[SOURCE]
anywhere = set()
for entries in pfx_source.values():
for entry in entries:
anywhere.add(entry)
anywhere = list(anywhere)
anywhere_not_ripe = set()
for db, entries in pfx_source.items():
if db != RIPE:
for entry in entries:
anywhere_not_ripe.add(entry)
anywhere_not_ripe = list(anywhere_not_ripe)
#print ' IRR orgins:', anywhere
#print ' IRR orgins % ripe:', anywhere_not_ripe
if not BGP in pfx_data:
bgp_origin = None
else:
# afaict this should never happen, at least not as long as we only have a single table
if len(pfx_data[BGP]) > 2:
print 'Multiple BGP sources:', pfx_data[BGP], 'only using first origin'
bgp_origin = list(pfx_data[BGP])[0]
# check if this is rfc managed space
managed = False
for source in pfx_data:
if source.endswith('_managed') and source.lower() != 'ripe_managed':
rfc_source = source.rsplit('_',1)[0]
pfx_data['advice'] = "Prefix is %s space. Drunk engineer." % rfc_source
pfx_data['label'] = "warning"
managed = True
break
if managed:
continue # don't bother checking anything else
if 'ripe_managed' in pfx_data:
if 'ripe' in pfx_source:
if bgp_origin and bgp_origin in pfx_source['ripe']:
if len(anywhere) == 1: # only ripe as origin
pfx_data['advice'] = "Perfect"
pfx_data['label'] = "success"
elif [bgp_origin] == anywhere:
pfx_data['advice'] = "Proper RIPE DB object, but foreign objects also exist, consider remoing these."
pfx_data['label'] = "warning"
elif anywhere_not_ripe:
pfx_data['advice'] = "Proper RIPE DB object, but foreign objects also exist, consider removing these. BGP origin does not match all IRR entries."
pfx_data['label'] = "danger"
elif len(anywhere) > 1:
pfx_data['advice'] = "Multiple entries exists in RIPE DB, with different origin. Consider removing conflicting entries."
pfx_data['label'] = "warning"
else:
pfx_data['advice'] = "Looks good, but multiple entries exists in RIPE DB"
pfx_data['label'] = "success"
elif bgp_origin and pfx_source:
pfx_data['advice'] = "Prefix is in DFZ, but registered with wrong origin in RIPE!"
pfx_data['label'] = "danger"
else: # no bgp origin
# same as last else clause, not sure if this could be made a bit better
if ':' in pfx:
pfx_data['advice'] = "Look like network is not announcing registered v6 prefix yet"
else:
pfx_data['advice'] = "Not seen in BGP, but (legacy?) route-objects exist, consider clean-up"
pfx_data['label'] = "info"
else: # no ripe registration
if bgp_origin:
# do if on as match
if anywhere: # could use anywhere_but_ripe - result would be the same
if [bgp_origin] == anywhere:
pfx_data['advice'] = "Prefix is in DFZ and has an IRR record, but NOT in RIPE! BGP origin matches IRR entries."
pfx_data['label'] = "warning"
else:
pfx_data['advice'] = "Prefix is in DFZ and has an IRR record, but NOT in RIPE! BGP origin does not match all IRR entries."
pfx_data['label'] = "danger"
else:
pfx_data['advice'] = "Prefix is in DFZ, but NOT registered in any IRR and should go into RIPE!"
pfx_data['label'] = "danger"
else:
pfx_data['advice'] = "Route objects in foreign registries exist, but no BGP origin. Consider moving IRR object to RIPE DB or deleting them."
pfx_data['label'] = "warning"
elif bgp_origin: # not ripe managed, but have bgp_origin
if bgp_origin in anywhere:
if len(anywhere) == 1:
pfx_data['advice'] = "Looks good: BGP origin consistent with AS in route-objects"
pfx_data['label'] = "success"
else:
pfx_data['advice'] = "Multiple route-object exist with different origins"
pfx_data['label'] = 'warning'
else:
pfx_data['advice'] = "Prefix in DFZ, but no route-object with correct origin anywhere"
pfx_data['label'] = "danger"
else: # not ripe managed, no bgp origin
if ':' in pfx:
pfx_data['advice'] = "Look like network is not announcing registered v6 prefix yet"
else:
pfx_data['advice'] = "Not seen in BGP, but (legacy?) route-objects exist, consider clean-up"
pfx_data['label'] = "info"
return prefixes
def prefix(pgdb, prefix):
"""
- find least specific
- search in BGP for more specifics
- search in IRR for more specifics
- check all prefixes whether they are RIPE managed or not
- return dict
"""
t_start = time.time()
print 'Prefix report: %s' % (prefix,)
routes = pgdb.query_prefix(prefix)
prefixes = _build_prefix_dict(routes)
# avoid spamming log too much
if len(prefixes) < 20:
print 'Prefixes:', prefixes.keys()
else:
print 'Prefixes: <%i>' % len(prefixes)
# TODO if we find any prefixes larger than the inputted one, we should find prefixes covered by that prefixes
# Go through the prefixes, find the shortest one, if it is different from the inputted one, do another search
add_prefix_advice(prefixes)
print 'Advice:'
for p,d in prefixes.items():
print '%s: %s' % (p,d['advice'])
# Move source out into top dict, to fit with the datatables stuff
for pfx_data in prefixes.values():
pfx_data.update(pfx_data.pop(SOURCE))
t_delta = time.time() - t_start
print 'Time for prefix report for %s: %s\n' % (prefix, round(t_delta,2))
return prefixes
def _build_prefix_dict(db_result):
result = {}
for route, asn, source, managed in db_result:
#print 'BDP', route, asn, source, managed
ps = result.setdefault(route, {}).setdefault(SOURCE, {})
if not asn in ps.get(source, []): # we can get duplicates due to htj's sql limitations
ps.setdefault(source, []).append(asn)
if managed:
result[route][managed] = True
# move bgp out from sources and into top-level dict for the prefix
for data in result.values():
if BGP in data[SOURCE]:
data[BGP] = data[SOURCE].pop(BGP)
return result
def as_prefixes(pgdb, as_number):
if not type(as_number) is int:
raise ValueError('Invalid argument provided for as number')
print 'AS Prefix Report: ', as_number
t_start = time.time()
prefixes = pgdb.query_as(as_number)
# do deep as query if prefix set is sufficiently small to do it fast
# we could probably go to ~1000 here
if len(prefixes) < 1000:
print 'Performing deep query for AS', as_number
prefixes = pgdb.query_as_deep(as_number)
result = _build_prefix_dict(prefixes)
add_prefix_advice(result)
print 'Advice:'
for p,d in result.items():
print '%s: %s' % (p,d['advice'])
# OK, this is not how i want to do things, but I cannot figure out the javascript stuff
for pfx_data in result.values():
pfx_data.update(pfx_data.pop(SOURCE))
t_delta = time.time() - t_start
print 'Time for AS prefixes for %s: %s' % (as_number, round(t_delta,2))
print
return result
def macro_expand(pgdb, as_macro):
print 'Macro Expand Report:', as_macro
t_start = time.time()
macros = pgdb.query_as_macro_expand(as_macro)
result = []
for macro, source, depth, path, members in macros:
e = { 'as_macro' : macro,
'source' : source,
'depth' : depth,
'path' : path,
'members' : members
}
result.append(e)
t_delta = time.time() - t_start
print 'Time for macro expand report for %s: %s' % (as_macro, round(t_delta,2))
print
return result
def macro_contain(pgdb, as_object):
print 'Macro Contains Report:', as_object
t_start = time.time()
macros = pgdb.query_as_contain(as_object)
result = {}
for macro, source in macros:
result.setdefault(macro, {})[source] = True
t_delta = time.time() - t_start
print 'Time for as contains report for %s: %s' % (as_object, round(t_delta,2))
print
return result
|
|
#
# Copyright (c) 2014 Tom Carroll
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
Created on 7 Jun 2014
'''
from actuator import (MultiComponent,
MultiComponentGroup, ComponentGroup, ctxt)
from actuator.infra import InfraModel, StaticServer, MultiResource
from actuator.provisioners.example_resources import Server
from actuator.modeling import CallContext
from actuator.namespace import Var, NamespaceModel, with_variables, Role,\
MultiRole
def setup():
pass
def test01():
from actuator.modeling import _ComputeModelComponents
class NoCompSource(_ComputeModelComponents):
pass
cs = NoCompSource()
try:
_ = cs._comp_source()
assert False, "Non-overridden _comp_source() should have raised"
except TypeError, e:
assert "Derived class must implement" in e.message
def test02():
try:
_ = ComponentGroup("oopsGroup", server=Server("server", mem="8GB"), oops="not me")
assert False, "Bad arg to ComponentGroup not caught"
except TypeError, e:
assert "isn't a kind of AbstractModelingEntity".lower() in e.message.lower()
def test03():
ce = ctxt.one.two.three
assert list(ce._path) == ["three", "two", "one"]
def test04():
ce = ctxt.model.infra.grid[0]
path = list(ce._path[1:])
ki = ce._path[0]
assert [ki.key] + path == ["0", "grid", "infra", "model"]
def test05():
class Infra(InfraModel):
grid = MultiComponent(Server("grid", mem="8GB"))
inst = Infra("iter")
for i in range(3):
_ = inst.grid[i]
assert set(inst.grid) == set(["0", "1", "2"])
def test06():
class Infra(InfraModel):
grid = MultiComponent(Server("grid", mem="8GB"))
inst = Infra("iter")
for i in range(3):
_ = inst.grid[i]
nada = "nada"
assert inst.grid.get("10", default=nada) == nada
def test07():
class Infra(InfraModel):
grid = MultiComponent(Server("grid", mem="8GB"))
inst = Infra("iter")
assert not inst.grid
class FakeReference(object):
"""
This class is used to act like a component in a model simply to satisfy the
interface contract of AbstractModelReference in order to construct
CallContext objects in tests below
"""
def __init__(self, name):
self._name = name
def test08():
class Infra(InfraModel):
clusters = MultiComponentGroup("cluster",
leader=Server("leader", mem="8GB"),
workers=MultiComponent(Server("worker", mem="8GB")))
infra = Infra("infra")
qexp = Infra.q.clusters.all().workers
for i in range(2):
cluster = infra.clusters[i]
for j in range(10):
_ = cluster.workers[j]
ctxt = CallContext(infra, FakeReference("wibble"))
result = qexp(ctxt)
assert len(result) == 20
def test09():
class Infra(InfraModel):
clusters = MultiComponentGroup("cluster",
leader=Server("leader", mem="8GB"),
workers=MultiComponent(Server("worker", mem="8GB")))
infra = Infra("infra")
qexp = Infra.q.clusters.all().workers.keyin([0, 1, 2, 3, 4])
for i in range(2):
cluster = infra.clusters[i]
for j in range(10):
_ = cluster.workers[j]
ctxt = CallContext(infra, FakeReference("wibble"))
result = qexp(ctxt)
assert len(result) == 10
def test10():
class Infra(InfraModel):
clusters = MultiComponentGroup("cluster",
leader=Server("leader", mem="8GB"),
workers=MultiComponent(Server("worker", mem="8GB")))
infra = Infra("infra")
qexp = Infra.q.clusters.match("(NY|LN)").workers
for i in ["NY", "LN", "SG", "TK", "ZU"]:
cluster = infra.clusters[i]
for j in range(10):
_ = cluster.workers[j]
ctxt = CallContext(infra, FakeReference("wibble"))
result = qexp(ctxt)
assert len(result) == 20
def test11():
class Infra(InfraModel):
clusters = MultiComponentGroup("cluster",
leader=Server("leader", mem="8GB"),
workers=MultiComponent(Server("worker", mem="8GB")))
infra = Infra("infra")
qexp = Infra.q.clusters.no_match("(NY|LN)").workers
for i in ["NY", "LN", "SG", "TK", "ZU"]:
cluster = infra.clusters[i]
for j in range(10):
_ = cluster.workers[j]
ctxt = CallContext(infra, FakeReference("wibble"))
result = qexp(ctxt)
assert len(result) == 30
def test12():
class Infra(InfraModel):
clusters = MultiComponentGroup("cluster",
leader=Server("leader", mem="8GB"),
workers=MultiComponent(Server("worker", mem="8GB")))
infra = Infra("infra")
def evens_only(key):
return int(key) % 2 == 0
qexp = Infra.q.clusters.workers.pred(evens_only)
for i in ["NY", "LN", "SG", "TK", "ZU"]:
cluster = infra.clusters[i]
for j in range(10):
_ = cluster.workers[j]
ctxt = CallContext(infra, FakeReference("wibble"))
result = qexp(ctxt)
assert len(result) == 25
def test13():
class Infra(InfraModel):
clusters = MultiComponentGroup("cluster",
leader=Server("leader", mem="8GB"),
workers=MultiComponent(Server("worker", mem="8GB")))
infra = Infra("infra")
def evens_only(key):
return int(key) % 2 == 0
qexp = Infra.q.clusters.match("(LN|NY)").workers.pred(evens_only)
for i in ["NY", "LN", "SG", "TK", "ZU"]:
cluster = infra.clusters[i]
for j in range(10):
_ = cluster.workers[j]
ctxt = CallContext(infra, FakeReference("wibble"))
result = qexp(ctxt)
assert len(result) == 10
def test14():
class Infra(InfraModel):
clusters = MultiComponentGroup("cluster",
leader=Server("leader", mem="8GB"),
cell=ComponentGroup("cell",
foreman=Server("foreman", mem="8"),
workers=MultiComponent(Server("worker", mem="8GB"))
)
)
infra = Infra("infra")
def evens_only(key):
return int(key) % 2 == 0
qexp = Infra.q.clusters.match("(LN|NY)").cell.workers.pred(evens_only)
for i in ["NY", "LN", "SG", "TK", "ZU"]:
cluster = infra.clusters[i]
for j in range(10):
_ = cluster.cell.workers[j]
ctxt = CallContext(infra, FakeReference("wibble"))
result = qexp(ctxt)
assert len(result) == 10
def test15():
class Infra(InfraModel):
clusters = MultiComponentGroup("cluster",
leader=Server("leader", mem="8GB"),
workers=MultiComponent(Server("worker", mem="8GB")))
infra = Infra("infra")
qexp = Infra.q.union(Infra.q.clusters.match("(NY|LN)").workers,
Infra.q.clusters.key("SG").leader)
for i in ["NY", "LN", "SG", "TK", "ZU"]:
cluster = infra.clusters[i]
for j in range(10):
_ = cluster.workers[j]
ctxt = CallContext(infra, FakeReference("wibble"))
result = qexp(ctxt)
assert len(result) == 21
def test16():
class Infra(InfraModel):
clusters = MultiComponentGroup("cluster",
leader=Server("leader", mem="8GB"),
workers=MultiComponent(Server("worker", mem="8GB")))
infra = Infra("infra")
def evens_only(key):
return int(key) % 2 == 0
def lt_seven(key):
return int(key) < 7
qexp = Infra.q.clusters.workers.pred(evens_only).pred(lt_seven)
for i in ["NY", "LN", "SG", "TK", "ZU"]:
cluster = infra.clusters[i]
for j in range(10):
_ = cluster.workers[j]
ctxt = CallContext(infra, FakeReference("wibble"))
result = qexp(ctxt)
assert len(result) == 20
def test17():
class Infra(InfraModel):
clusters = MultiComponentGroup("cluster",
leader=Server("leader", mem="8GB"),
workers=MultiComponent(Server("worker", mem="8GB")))
try:
_ = Infra.q.cluster.workers
assert False, "This should have complained about 'cluster' not being an attribute"
except AttributeError, e:
assert "cluster" in e.message.lower()
def test18():
class Infra(InfraModel):
clusters = MultiComponentGroup("cluster",
leader=Server("leader", mem="8GB"),
workers=MultiComponent(Server("worker", mem="8GB")))
infra = Infra("infra")
assert infra.nexus
def test19():
class InfraIPTest(InfraModel):
s = StaticServer("sommat", "127.0.0.1")
infra = InfraIPTest("test")
infra.s.fix_arguments()
class IPTest(NamespaceModel):
with_variables(Var("ADDY", ctxt.model.infra.s.get_ip))
r = Role("bogus")
ns = IPTest()
ns.set_infra_model(infra)
v, o = ns.find_variable("ADDY")
assert v.get_value(ns.r) == "127.0.0.1"
def host_list(ctx_exp, sep_char=" "):
def host_list_inner(ctx):
hlist = list(ctx_exp(ctx))
#this next line is needed as the framework isn't doing the
#arg fixing for us
_ = [h.host_ref.fix_arguments() for h in hlist]
ip_list = [h.host_ref.get_ip() for h in hlist]
return sep_char.join(ip_list)
return host_list_inner
def test20():
class IPFactory(object):
def __init__(self):
self.host = 0
def __call__(self, ctx=None):
self.host += 1
return "192.168.1.%d" % self.host
ipfactory = IPFactory()
class Infra20(InfraModel):
slaves = MultiResource(StaticServer("slave", ipfactory))
infra = Infra20("i20")
class Namespace20(NamespaceModel):
with_variables(Var("EXPR", host_list(ctxt.model.q.s)))
s = MultiRole(Role("dude", host_ref=Infra20.slaves[ctxt.name]))
ns = Namespace20()
for i in range(5):
ns.s[i].fix_arguments()
v, o = ns.s[0].find_variable("EXPR")
assert len(v.get_value(ns.s[0]).split(" ")) == 5
def do_all():
setup()
test19()
for k, v in globals().items():
if k.startswith("test") and callable(v):
v()
if __name__ == "__main__":
do_all()
|
|
# Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import logging
import optparse
import sys
from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
from webkitpy.common.system.executive import ScriptError
from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models.test_expectations import TestExpectations, BASELINE_SUFFIX_LIST
from webkitpy.port import builders
from webkitpy.port import factory
from webkitpy.tool.multicommandtool import Command
_log = logging.getLogger(__name__)
# FIXME: Should TestResultWriter know how to compute this string?
def _baseline_name(fs, test_name, suffix):
return fs.splitext(test_name)[0] + TestResultWriter.FILENAME_SUFFIX_EXPECTED + "." + suffix
class AbstractRebaseliningCommand(Command):
# not overriding execute() - pylint: disable=W0223
move_overwritten_baselines_option = optparse.make_option("--move-overwritten-baselines", action="store_true", default=False,
help="Move overwritten baselines elsewhere in the baseline path. This is for bringing up new ports.")
no_optimize_option = optparse.make_option('--no-optimize', dest='optimize', action='store_false', default=True,
help=('Do not optimize/de-dup the expectations after rebaselining (default is to de-dup automatically). '
'You can use "webkit-patch optimize-baselines" to optimize separately.'))
platform_options = factory.platform_options(use_globs=True)
results_directory_option = optparse.make_option("--results-directory", help="Local results directory to use")
suffixes_option = optparse.make_option("--suffixes", default=','.join(BASELINE_SUFFIX_LIST), action="store",
help="Comma-separated-list of file types to rebaseline")
def __init__(self, options=None):
super(AbstractRebaseliningCommand, self).__init__(options=options)
self._baseline_suffix_list = BASELINE_SUFFIX_LIST
class RebaselineTest(AbstractRebaseliningCommand):
name = "rebaseline-test-internal"
help_text = "Rebaseline a single test from a buildbot. Only intended for use by other webkit-patch commands."
def __init__(self):
super(RebaselineTest, self).__init__(options=[
self.no_optimize_option,
self.results_directory_option,
self.suffixes_option,
optparse.make_option("--builder", help="Builder to pull new baselines from"),
optparse.make_option("--move-overwritten-baselines-to", action="append", default=[],
help="Platform to move existing baselines to before rebaselining. This is for bringing up new ports."),
optparse.make_option("--test", help="Test to rebaseline"),
])
self._scm_changes = {'add': []}
def _results_url(self, builder_name):
return self._tool.buildbot.builder_with_name(builder_name).latest_layout_test_results_url()
def _baseline_directory(self, builder_name):
port = self._tool.port_factory.get_from_builder_name(builder_name)
override_dir = builders.rebaseline_override_dir(builder_name)
if override_dir:
return self._tool.filesystem.join(port.layout_tests_dir(), 'platform', override_dir)
return port.baseline_version_dir()
def _copy_existing_baseline(self, move_overwritten_baselines_to, test_name, suffix):
old_baselines = []
new_baselines = []
# Need to gather all the baseline paths before modifying the filesystem since
# the modifications can affect the results of port.expected_filename.
for platform in move_overwritten_baselines_to:
port = self._tool.port_factory.get(platform)
old_baseline = port.expected_filename(test_name, "." + suffix)
if not self._tool.filesystem.exists(old_baseline):
_log.debug("No existing baseline for %s." % test_name)
continue
new_baseline = self._tool.filesystem.join(port.baseline_path(), self._file_name_for_expected_result(test_name, suffix))
if self._tool.filesystem.exists(new_baseline):
_log.debug("Existing baseline at %s, not copying over it." % new_baseline)
continue
old_baselines.append(old_baseline)
new_baselines.append(new_baseline)
for i in range(len(old_baselines)):
old_baseline = old_baselines[i]
new_baseline = new_baselines[i]
_log.debug("Copying baseline from %s to %s." % (old_baseline, new_baseline))
self._tool.filesystem.maybe_make_directory(self._tool.filesystem.dirname(new_baseline))
self._tool.filesystem.copyfile(old_baseline, new_baseline)
if not self._tool.scm().exists(new_baseline):
self._add_to_scm(new_baseline)
def _save_baseline(self, data, target_baseline):
if not data:
return
filesystem = self._tool.filesystem
filesystem.maybe_make_directory(filesystem.dirname(target_baseline))
filesystem.write_binary_file(target_baseline, data)
if not self._tool.scm().exists(target_baseline):
self._add_to_scm(target_baseline)
def _add_to_scm(self, path):
self._scm_changes['add'].append(path)
def _update_expectations_file(self, builder_name, test_name):
port = self._tool.port_factory.get_from_builder_name(builder_name)
# Since rebaseline-test-internal can be called multiple times in parallel,
# we need to ensure that we're not trying to update the expectations file
# concurrently as well.
# FIXME: We should rework the code to not need this; maybe just download
# the files in parallel and rebaseline local files serially?
try:
path = port.path_to_test_expectations_file()
lock = self._tool.make_file_lock(path + '.lock')
lock.acquire_lock()
expectations = TestExpectations(port, include_generic=False, include_overrides=False)
for test_configuration in port.all_test_configurations():
if test_configuration.version == port.test_configuration().version:
expectationsString = expectations.remove_configuration_from_test(test_name, test_configuration)
self._tool.filesystem.write_text_file(path, expectationsString)
finally:
lock.release_lock()
def _test_root(self, test_name):
return self._tool.filesystem.splitext(test_name)[0]
def _file_name_for_actual_result(self, test_name, suffix):
return "%s-actual.%s" % (self._test_root(test_name), suffix)
def _file_name_for_expected_result(self, test_name, suffix):
return "%s-expected.%s" % (self._test_root(test_name), suffix)
def _rebaseline_test(self, builder_name, test_name, move_overwritten_baselines_to, suffix, results_url):
baseline_directory = self._baseline_directory(builder_name)
source_baseline = "%s/%s" % (results_url, self._file_name_for_actual_result(test_name, suffix))
target_baseline = self._tool.filesystem.join(baseline_directory, self._file_name_for_expected_result(test_name, suffix))
if move_overwritten_baselines_to:
self._copy_existing_baseline(move_overwritten_baselines_to, test_name, suffix)
_log.debug("Retrieving %s." % source_baseline)
self._save_baseline(self._tool.web.get_binary(source_baseline, convert_404_to_None=True), target_baseline)
def _rebaseline_test_and_update_expectations(self, options):
if options.results_directory:
results_url = 'file://' + options.results_directory
else:
results_url = self._results_url(options.builder)
self._baseline_suffix_list = options.suffixes.split(',')
for suffix in self._baseline_suffix_list:
self._rebaseline_test(options.builder, options.test, options.move_overwritten_baselines_to, suffix, results_url)
self._update_expectations_file(options.builder, options.test)
def execute(self, options, args, tool):
self._rebaseline_test_and_update_expectations(options)
print json.dumps(self._scm_changes)
class OptimizeBaselines(AbstractRebaseliningCommand):
name = "optimize-baselines"
help_text = "Reshuffles the baselines for the given tests to use as litte space on disk as possible."
argument_names = "TEST_NAMES"
def __init__(self):
super(OptimizeBaselines, self).__init__(options=[self.suffixes_option] + self.platform_options)
def _optimize_baseline(self, optimizer, test_name):
for suffix in self._baseline_suffix_list:
baseline_name = _baseline_name(self._tool.filesystem, test_name, suffix)
if not optimizer.optimize(baseline_name):
print "Heuristics failed to optimize %s" % baseline_name
def execute(self, options, args, tool):
self._baseline_suffix_list = options.suffixes.split(',')
port_names = tool.port_factory.all_port_names(options.platform)
if not port_names:
print "No port names match '%s'" % options.platform
return
optimizer = BaselineOptimizer(tool, port_names)
port = tool.port_factory.get(port_names[0])
for test_name in port.tests(args):
_log.info("Optimizing %s" % test_name)
self._optimize_baseline(optimizer, test_name)
class AnalyzeBaselines(AbstractRebaseliningCommand):
name = "analyze-baselines"
help_text = "Analyzes the baselines for the given tests and prints results that are identical."
argument_names = "TEST_NAMES"
def __init__(self):
super(AnalyzeBaselines, self).__init__(options=[
self.suffixes_option,
optparse.make_option('--missing', action='store_true', default=False, help='show missing baselines as well'),
] + self.platform_options)
self._optimizer_class = BaselineOptimizer # overridable for testing
self._baseline_optimizer = None
self._port = None
def _write(self, msg):
print msg
def _analyze_baseline(self, options, test_name):
for suffix in self._baseline_suffix_list:
baseline_name = _baseline_name(self._tool.filesystem, test_name, suffix)
results_by_directory = self._baseline_optimizer.read_results_by_directory(baseline_name)
if results_by_directory:
self._write("%s:" % baseline_name)
self._baseline_optimizer.write_by_directory(results_by_directory, self._write, " ")
elif options.missing:
self._write("%s: (no baselines found)" % baseline_name)
def execute(self, options, args, tool):
self._baseline_suffix_list = options.suffixes.split(',')
port_names = tool.port_factory.all_port_names(options.platform)
if not port_names:
print "No port names match '%s'" % options.platform
return
self._baseline_optimizer = self._optimizer_class(tool, port_names)
self._port = tool.port_factory.get(port_names[0])
for test_name in self._port.tests(args):
self._analyze_baseline(options, test_name)
class AbstractParallelRebaselineCommand(AbstractRebaseliningCommand):
# not overriding execute() - pylint: disable=W0223
def _run_webkit_patch(self, args, verbose):
try:
verbose_args = ['--verbose'] if verbose else []
stderr = self._tool.executive.run_command([self._tool.path()] + verbose_args + args, cwd=self._tool.scm().checkout_root, return_stderr=True)
for line in stderr.splitlines():
print >> sys.stderr, line
except ScriptError, e:
_log.error(e)
def _builders_to_fetch_from(self, builders_to_check):
# This routine returns the subset of builders that will cover all of the baseline search paths
# used in the input list. In particular, if the input list contains both Release and Debug
# versions of a configuration, we *only* return the Release version (since we don't save
# debug versions of baselines).
release_builders = set()
debug_builders = set()
builders_to_fallback_paths = {}
for builder in builders_to_check:
port = self._tool.port_factory.get_from_builder_name(builder)
if port.test_configuration().build_type == 'Release':
release_builders.add(builder)
else:
debug_builders.add(builder)
for builder in list(release_builders) + list(debug_builders):
port = self._tool.port_factory.get_from_builder_name(builder)
fallback_path = port.baseline_search_path()
if fallback_path not in builders_to_fallback_paths.values():
builders_to_fallback_paths[builder] = fallback_path
return builders_to_fallback_paths.keys()
def _rebaseline_commands(self, test_list, options):
path_to_webkit_patch = self._tool.path()
cwd = self._tool.scm().checkout_root
commands = []
for test in test_list:
for builder in self._builders_to_fetch_from(test_list[test]):
suffixes = ','.join(test_list[test][builder])
cmd_line = [path_to_webkit_patch, 'rebaseline-test-internal', '--suffixes', suffixes, '--builder', builder, '--test', test]
if options.move_overwritten_baselines:
move_overwritten_baselines_to = builders.move_overwritten_baselines_to(builder)
for platform in move_overwritten_baselines_to:
cmd_line.extend(['--move-overwritten-baselines-to', platform])
if options.results_directory:
cmd_line.extend(['--results-directory', options.results_directory])
if options.verbose:
cmd_line.append('--verbose')
commands.append(tuple([cmd_line, cwd]))
return commands
def _files_to_add(self, command_results):
files_to_add = set()
for output in [result[1].split('\n') for result in command_results]:
file_added = False
for line in output:
try:
if line:
files_to_add.update(json.loads(line)['add'])
file_added = True
except ValueError:
_log.debug('"%s" is not a JSON object, ignoring' % line)
if not file_added:
_log.debug('Could not add file based off output "%s"' % output)
return list(files_to_add)
def _optimize_baselines(self, test_list, verbose=False):
# We don't run this in parallel because modifying the SCM in parallel is unreliable.
for test in test_list:
all_suffixes = set()
for builder in self._builders_to_fetch_from(test_list[test]):
all_suffixes.update(test_list[test][builder])
# FIXME: We should propagate the platform options as well.
self._run_webkit_patch(['optimize-baselines', '--suffixes', ','.join(all_suffixes), test], verbose)
def _rebaseline(self, options, test_list):
for test, builders_to_check in sorted(test_list.items()):
_log.info("Rebaselining %s" % test)
for builder, suffixes in sorted(builders_to_check.items()):
_log.debug(" %s: %s" % (builder, ",".join(suffixes)))
commands = self._rebaseline_commands(test_list, options)
command_results = self._tool.executive.run_in_parallel(commands)
log_output = '\n'.join(result[2] for result in command_results).replace('\n\n', '\n')
for line in log_output.split('\n'):
if line:
print >> sys.stderr, line # FIXME: Figure out how to log properly.
files_to_add = self._files_to_add(command_results)
if files_to_add:
self._tool.scm().add_list(list(files_to_add))
if options.optimize:
self._optimize_baselines(test_list, options.verbose)
class RebaselineJson(AbstractParallelRebaselineCommand):
name = "rebaseline-json"
help_text = "Rebaseline based off JSON passed to stdin. Intended to only be called from other scripts."
def __init__(self,):
super(RebaselineJson, self).__init__(options=[
self.move_overwritten_baselines_option,
self.no_optimize_option,
self.results_directory_option,
])
def execute(self, options, args, tool):
self._rebaseline(options, json.loads(sys.stdin.read()))
class RebaselineExpectations(AbstractParallelRebaselineCommand):
name = "rebaseline-expectations"
help_text = "Rebaselines the tests indicated in TestExpectations."
def __init__(self):
super(RebaselineExpectations, self).__init__(options=[
self.move_overwritten_baselines_option,
self.no_optimize_option,
] + self.platform_options)
self._test_list = None
def _update_expectations_files(self, port_name):
port = self._tool.port_factory.get(port_name)
expectations = TestExpectations(port)
for path in port.expectations_dict():
if self._tool.filesystem.exists(path):
self._tool.filesystem.write_text_file(path, expectations.remove_rebaselined_tests(expectations.get_rebaselining_failures(), path))
def _tests_to_rebaseline(self, port):
tests_to_rebaseline = {}
expectations = TestExpectations(port, include_overrides=True)
for test in expectations.get_rebaselining_failures():
tests_to_rebaseline[test] = TestExpectations.suffixes_for_expectations(expectations.get_expectations(test))
return tests_to_rebaseline
def _add_tests_to_rebaseline_for_port(self, port_name):
builder_name = builders.builder_name_for_port_name(port_name)
if not builder_name:
return
tests = self._tests_to_rebaseline(self._tool.port_factory.get(port_name)).items()
if tests:
_log.info("Retrieving results for %s from %s." % (port_name, builder_name))
for test_name, suffixes in tests:
_log.info(" %s (%s)" % (test_name, ','.join(suffixes)))
if test_name not in self._test_list:
self._test_list[test_name] = {}
self._test_list[test_name][builder_name] = suffixes
def execute(self, options, args, tool):
options.results_directory = None
self._test_list = {}
port_names = tool.port_factory.all_port_names(options.platform)
for port_name in port_names:
self._add_tests_to_rebaseline_for_port(port_name)
if not self._test_list:
_log.warning("Did not find any tests marked Rebaseline.")
return
self._rebaseline(options, self._test_list)
for port_name in port_names:
self._update_expectations_files(port_name)
class Rebaseline(AbstractParallelRebaselineCommand):
name = "rebaseline"
help_text = "Rebaseline tests with results from the build bots. Shows the list of failing tests on the builders if no test names are provided."
argument_names = "[TEST_NAMES]"
def __init__(self):
super(Rebaseline, self).__init__(options=[
self.move_overwritten_baselines_option,
self.no_optimize_option,
# FIXME: should we support the platform options in addition to (or instead of) --builders?
self.suffixes_option,
optparse.make_option("--builders", default=None, action="append", help="Comma-separated-list of builders to pull new baselines from (can also be provided multiple times)"),
])
def _builders_to_pull_from(self):
webkit_buildbot_builder_names = []
for name in builders.all_builder_names():
webkit_buildbot_builder_names.append(name)
titles = ["build.webkit.org bots"]
lists = [webkit_buildbot_builder_names]
chosen_names = self._tool.user.prompt_with_multiple_lists("Which builder to pull results from:", titles, lists, can_choose_multiple=True)
return [self._builder_with_name(name) for name in chosen_names]
def _builder_with_name(self, name):
return self._tool.buildbot.builder_with_name(name)
def _tests_to_update(self, builder):
failing_tests = builder.latest_layout_test_results().tests_matching_failure_types([test_failures.FailureTextMismatch])
return self._tool.user.prompt_with_list("Which test(s) to rebaseline for %s:" % builder.name(), failing_tests, can_choose_multiple=True)
def execute(self, options, args, tool):
options.results_directory = None
if options.builders:
builders_to_check = []
for builder_names in options.builders:
builders_to_check += [self._builder_with_name(name) for name in builder_names.split(",")]
else:
builders_to_check = self._builders_to_pull_from()
test_list = {}
suffixes_to_update = options.suffixes.split(",")
for builder in builders_to_check:
tests = args or self._tests_to_update(builder)
for test in tests:
if test not in test_list:
test_list[test] = {}
test_list[test][builder.name()] = suffixes_to_update
if options.verbose:
_log.debug("rebaseline-json: " + str(test_list))
self._rebaseline(options, test_list)
|
|
# stdlib
import copy
import inspect
from itertools import product
import logging
import os
from pprint import pformat
import signal
import sys
import time
import traceback
import unittest
# project
from checks import AgentCheck
from config import get_checksd_path
try:
from util import get_hostname, get_os
except ImportError:
from utils.hostname import get_hostname
from utils.platform import get_os
from utils.debug import get_check # noqa - FIXME 5.5.0 AgentCheck tests should not use this
log = logging.getLogger('tests')
def get_check_class(name):
checksd_path = get_checksd_path(get_os())
if checksd_path not in sys.path:
sys.path.append(checksd_path)
check_module = __import__(name)
check_class = None
classes = inspect.getmembers(check_module, inspect.isclass)
for _, clsmember in classes:
if clsmember == AgentCheck:
continue
if issubclass(clsmember, AgentCheck):
check_class = clsmember
if AgentCheck in clsmember.__bases__:
continue
else:
break
return check_class
def load_class(check_name, class_name):
"""
Retrieve a class with the given name within the given check module.
"""
checksd_path = get_checksd_path(get_os())
if checksd_path not in sys.path:
sys.path.append(checksd_path)
check_module = __import__(check_name)
classes = inspect.getmembers(check_module, inspect.isclass)
for name, clsmember in classes:
if name == class_name:
return clsmember
raise Exception(u"Unable to import class {0} from the check module.".format(class_name))
def load_check(name, config, agentConfig):
checksd_path = get_checksd_path(get_os())
if checksd_path not in sys.path:
sys.path.append(checksd_path)
check_module = __import__(name)
check_class = None
classes = inspect.getmembers(check_module, inspect.isclass)
for _, clsmember in classes:
if clsmember == AgentCheck:
continue
if issubclass(clsmember, AgentCheck):
check_class = clsmember
if AgentCheck in clsmember.__bases__:
continue
else:
break
if check_class is None:
raise Exception("Unable to import check %s. Missing a class that inherits AgentCheck" % name)
init_config = config.get('init_config', {})
instances = config.get('instances')
agentConfig['checksd_hostname'] = get_hostname(agentConfig)
# init the check class
try:
return check_class(name, init_config=init_config, agentConfig=agentConfig, instances=instances)
except TypeError as e:
raise Exception("Check is using old API, {0}".format(e))
except Exception:
raise
def kill_subprocess(process_obj):
try:
process_obj.terminate()
except AttributeError:
# py < 2.6 doesn't support process.terminate()
if get_os() == 'windows':
import ctypes
PROCESS_TERMINATE = 1
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE, False,
process_obj.pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
else:
os.kill(process_obj.pid, signal.SIGKILL)
class Fixtures(object):
@staticmethod
def integration_name():
for stack in inspect.stack():
# stack[1] is the file path
file_name = os.path.basename(stack[1])
if 'test_' in file_name:
# test_name.py
# 5 -3
return file_name[5:-3]
raise Exception('No integration test file in stack')
@staticmethod
def directory():
return os.path.join(os.path.dirname(__file__), 'fixtures',
Fixtures.integration_name())
@staticmethod
def file(file_name):
return os.path.join(Fixtures.directory(), file_name)
@staticmethod
def read_file(file_name, string_escape=True):
with open(Fixtures.file(file_name)) as f:
contents = f.read()
if string_escape:
contents = contents.decode('string-escape')
return contents.decode("utf-8")
class AgentCheckTest(unittest.TestCase):
DEFAULT_AGENT_CONFIG = {
'version': '0.1',
'api_key': 'toto'
}
def __init__(self, *args, **kwargs):
super(AgentCheckTest, self).__init__(*args, **kwargs)
if not hasattr(self, 'CHECK_NAME'):
raise Exception("You must define CHECK_NAME")
self.check = None
def is_travis(self):
return "TRAVIS" in os.environ
def load_check(self, config, agent_config=None):
agent_config = agent_config or self.DEFAULT_AGENT_CONFIG
self.check = load_check(self.CHECK_NAME, config, agent_config)
def load_class(self, name):
"""
Retrieve a class with the given name among the check module.
"""
return load_class(self.CHECK_NAME, name)
# Helper function when testing rates
def run_check_twice(self, config, agent_config=None, mocks=None,
force_reload=False):
self.run_check(config, agent_config, mocks, force_reload)
time.sleep(1)
self.run_check(config, agent_config, mocks)
def run_check_n(self, config, agent_config=None, mocks=None,
force_reload=False, repeat=1, sleep=1):
for i in xrange(repeat):
if not i:
self.run_check(config, agent_config, mocks, force_reload)
else:
self.run_check(config, agent_config, mocks)
time.sleep(sleep)
def run_check(self, config, agent_config=None, mocks=None, force_reload=False):
# If not loaded already, do it!
if self.check is None or force_reload:
self.load_check(config, agent_config=agent_config)
if mocks is not None:
for func_name, mock in mocks.iteritems():
if not hasattr(self.check, func_name):
continue
else:
setattr(self.check, func_name, mock)
error = None
for instance in self.check.instances:
try:
# Deepcopy needed to avoid weird duplicate tagging situations
# ie the check edits the tags of the instance, problematic if
# run twice
self.check.check(copy.deepcopy(instance))
# FIXME: This should be called within the `run` method only
self.check._roll_up_instance_metadata()
except Exception, e:
# Catch error before re-raising it to be able to get service_checks
print "Exception {0} during check".format(e)
print traceback.format_exc()
error = e
self.metrics = self.check.get_metrics()
self.events = self.check.get_events()
self.service_checks = self.check.get_service_checks()
self.service_metadata = []
self.warnings = self.check.get_warnings()
# clean {} service_metadata (otherwise COVERAGE fails for nothing)
for metadata in self.check.get_service_metadata():
if metadata:
self.service_metadata.append(metadata)
if error is not None:
raise error
def print_current_state(self):
log.debug("""++++++++ CURRENT STATE ++++++++
METRICS
{metrics}
EVENTS
{events}
SERVICE CHECKS
{sc}
SERVICE METADATA
{sm}
WARNINGS
{warnings}
++++++++++++++++++++++++++++""".format(
metrics=pformat(self.metrics),
events=pformat(self.events),
sc=pformat(self.service_checks),
sm=pformat(self.service_metadata),
warnings=pformat(self.warnings)
))
def _generate_coverage_metrics(self, data, indice=None):
total = len(data)
tested = 0
untested = []
for d in data:
if (indice and d[indice] or d).get('tested'):
tested += 1
else:
untested.append(d)
if total == 0:
coverage = 100.0
else:
coverage = 100.0 * tested / total
return tested, total, coverage, untested
def coverage_report(self):
tested_metrics, total_metrics, coverage_metrics, untested_metrics = \
self._generate_coverage_metrics(self.metrics, indice=3)
tested_sc, total_sc, coverage_sc, untested_sc = \
self._generate_coverage_metrics(self.service_checks)
tested_sm, total_sm, coverage_sm, untested_sm = \
self._generate_coverage_metrics(self.service_metadata)
tested_events, total_events, coverage_events, untested_events = \
self._generate_coverage_metrics(self.events)
coverage = """Coverage
========================================
METRICS
Tested {tested_metrics}/{total_metrics} ({coverage_metrics}%)
UNTESTED: {untested_metrics}
EVENTS
Tested {tested_events}/{total_events} ({coverage_events}%)
UNTESTED: {untested_events}
SERVICE CHECKS
Tested {tested_sc}/{total_sc} ({coverage_sc}%)
UNTESTED: {untested_sc}
SERVICE METADATA
Tested {tested_sm}/{total_sm} ({coverage_sm}%)
UNTESTED: {untested_sm}
========================================"""
log.info(coverage.format(
tested_metrics=tested_metrics,
total_metrics=total_metrics,
coverage_metrics=coverage_metrics,
untested_metrics=pformat(untested_metrics),
tested_sc=tested_sc,
total_sc=total_sc,
coverage_sc=coverage_sc,
untested_sc=pformat(untested_sc),
tested_sm=tested_sm,
total_sm=total_sm,
coverage_sm=coverage_sm,
untested_sm=pformat(untested_sm),
tested_events=tested_events,
total_events=total_events,
coverage_events=coverage_events,
untested_events=pformat(untested_events),
))
if os.getenv('COVERAGE'):
self.assertEquals(coverage_metrics, 100.0)
self.assertEquals(coverage_events, 100.0)
self.assertEquals(coverage_sc, 100.0)
self.assertEquals(coverage_sm, 100.0)
def _candidates_size_assert(self, candidates, count=None, at_least=1):
try:
if count is not None:
self.assertEquals(
len(candidates), count,
"Needed exactly %d candidates, got %d" % (count, len(candidates))
)
else:
self.assertTrue(
len(candidates) >= at_least,
"Needed at least %d candidates, got %d" % (at_least, len(candidates))
)
except AssertionError:
self.print_current_state()
raise
def assertMetric(self, metric_name, value=None, tags=None, count=None,
at_least=1, hostname=None, device_name=None, metric_type=None):
candidates = []
for m_name, ts, val, mdata in self.metrics:
if m_name == metric_name:
if value is not None and val != value:
continue
if tags is not None and sorted(tags) != sorted(mdata.get("tags", [])):
continue
if hostname is not None and mdata['hostname'] != hostname:
continue
if device_name is not None and mdata['device_name'] != device_name:
continue
if metric_type is not None and mdata['type'] != metric_type:
continue
candidates.append((m_name, ts, val, mdata))
try:
self._candidates_size_assert(candidates, count=count, at_least=at_least)
except AssertionError:
log.error("Candidates size assertion for {0} (value: {1}, tags: {2}, "
"count: {3}, at_least: {4}, hostname: {5}) failed"
.format(metric_name, value, tags, count, at_least, hostname))
raise
for mtuple in self.metrics:
for cmtuple in candidates:
if mtuple == cmtuple:
mtuple[3]['tested'] = True
log.debug("{0} FOUND !".format(metric_name))
def assertMetricTagPrefix(self, metric_name, tag_prefix, count=None, at_least=1):
log.debug("Looking for a tag starting with `{0}:` on metric {1}"
.format(tag_prefix, metric_name))
if count is not None:
log.debug(" * should have exactly {0} data points".format(count))
elif at_least is not None:
log.debug(" * should have at least {0} data points".format(at_least))
candidates = []
for m_name, ts, val, mdata in self.metrics:
if m_name == metric_name:
gtags = [t for t in mdata['tags'] if t.startswith(tag_prefix)]
if not gtags:
continue
candidates.append((m_name, ts, val, mdata))
try:
self._candidates_size_assert(candidates, count=count)
except AssertionError:
log.error("Candidates size assertion for {0} (tag_prefix: {1}, "
"count: {2}, at_least: {3}) failed".format(metric_name,
tag_prefix,
count,
at_least))
raise
for mtuple in self.metrics:
for cmtuple in candidates:
if mtuple == cmtuple:
mtuple[3]['tested'] = True
log.debug("{0} FOUND !".format(metric_name))
def assertMetricTag(self, metric_name, tag, count=None, at_least=1):
log.debug("Looking for tag {0} on metric {1}".format(tag, metric_name))
if count is not None:
log.debug(" * should have exactly {0} data points".format(count))
elif at_least is not None:
log.debug(" * should have at least {0} data points".format(at_least))
candidates = []
for m_name, ts, val, mdata in self.metrics:
if m_name == metric_name:
gtags = [t for t in mdata['tags'] if t == tag]
if not gtags:
continue
candidates.append((m_name, ts, val, mdata))
try:
self._candidates_size_assert(candidates, count=count)
except AssertionError:
log.error("Candidates size assertion for {0} (tag: {1}, count={2},"
" at_least={3}) failed".format(metric_name, tag, count, at_least))
raise
for mtuple in self.metrics:
for cmtuple in candidates:
if mtuple == cmtuple:
mtuple[3]['tested'] = True
log.debug("{0} FOUND !".format(metric_name))
def assertServiceMetadata(self, meta_keys, count=None, at_least=1):
log.debug("Looking for service metadata with keys {0}".format(meta_keys))
if count is not None:
log.debug(" * should be defined for exactly {0} instances".format(count))
elif at_least is not None:
log.debug(" * should be defined for at least {0} instances".format(at_least))
candidates = []
for sm in self.service_metadata:
if sorted(sm.keys()) != sorted(meta_keys):
continue
candidates.append(sm)
try:
self._candidates_size_assert(candidates, count=count, at_least=at_least)
except AssertionError:
log.error("Candidates size assertion for service metadata with keys {0}"
" (count: {1}, at_least: {2}) failed".format(meta_keys, count, at_least))
raise
for sm in self.service_metadata:
for csm in candidates:
if sm == csm:
sm['tested'] = True
log.debug("Service metadata FOUND !")
def assertServiceCheck(self, service_check_name, status=None, tags=None,
count=None, at_least=1):
log.debug("Looking for service check {0}".format(service_check_name))
if status is not None:
log.debug(" * with status {0}".format(status))
if tags is not None:
log.debug(" * tagged with {0}".format(tags))
if count is not None:
log.debug(" * should have exactly {0} statuses".format(count))
elif at_least is not None:
log.debug(" * should have at least {0} statuses".format(at_least))
candidates = []
for sc in self.service_checks:
if sc['check'] == service_check_name:
if status is not None and sc['status'] != status:
continue
if tags is not None and sorted(tags) != sorted(sc.get("tags")):
continue
candidates.append(sc)
try:
self._candidates_size_assert(candidates, count=count, at_least=at_least)
except AssertionError:
log.error("Candidates size assertion for {0} (status: {1}, "
"tags: {2}, count: {3}, at_least: {4}) failed".format(service_check_name,
status,
tags,
count,
at_least))
raise
for sc in self.service_checks:
for csc in candidates:
if sc == csc:
sc['tested'] = True
log.debug("{0} FOUND !".format(service_check_name))
def assertServiceCheckOK(self, service_check_name, tags=None, count=None, at_least=1):
self.assertServiceCheck(service_check_name,
status=AgentCheck.OK,
tags=tags,
count=count,
at_least=at_least)
def assertServiceCheckWarning(self, service_check_name, tags=None, count=None, at_least=1):
self.assertServiceCheck(service_check_name,
status=AgentCheck.WARNING,
tags=tags,
count=count,
at_least=at_least)
def assertServiceCheckCritical(self, service_check_name, tags=None, count=None, at_least=1):
self.assertServiceCheck(service_check_name,
status=AgentCheck.CRITICAL,
tags=tags,
count=count,
at_least=at_least)
def assertServiceCheckUnknown(self, service_check_name, tags=None, count=None, at_least=1):
self.assertServiceCheck(service_check_name,
status=AgentCheck.UNKNOWN,
tags=tags,
count=count,
at_least=at_least)
def assertIn(self, first, second):
self.assertTrue(first in second, "{0} not in {1}".format(first, second))
def assertNotIn(self, first, second):
self.assertTrue(first not in second, "{0} in {1}".format(first, second))
def assertWarning(self, warning, count=None, at_least=1, exact_match=True):
log.debug("Looking for warning {0}".format(warning))
if count is not None:
log.debug(" * should have exactly {0} statuses".format(count))
elif at_least is not None:
log.debug(" * should have at least {0} statuses".format(count))
if exact_match:
candidates = [w for w in self.warnings if w == warning]
else:
candidates = [w for w in self.warnings if warning in w]
try:
self._candidates_size_assert(candidates, count=count, at_least=at_least)
except AssertionError:
log.error("Candidates size assertion for {0}, count: {1}, "
"at_least: {2}) failed".format(warning, count, at_least))
raise
log.debug("{0} FOUND !".format(warning))
# Potential kwargs: aggregation_key, alert_type, event_type,
# msg_title, source_type_name
def assertEvent(self, msg_text, count=None, at_least=1, exact_match=True,
tags=None, **kwargs):
log.debug("Looking for event {0}".format(msg_text))
if tags is not None:
log.debug(" * tagged with {0}".format(tags))
for name, value in kwargs.iteritems():
if value is not None:
log.debug(" * with {0} {1}".format(name, value))
if count is not None:
log.debug(" * should have exactly {0} events".format(count))
elif at_least is not None:
log.debug(" * should have at least {0} events".format(count))
candidates = []
for e in self.events:
if exact_match and msg_text != e['msg_text'] or \
not exact_match and msg_text not in e['msg_text']:
continue
if tags and set(tags) != set(e['tags']):
continue
for name, value in kwargs.iteritems():
if e[name] != value:
break
else:
candidates.append(e)
try:
self._candidates_size_assert(candidates, count=count, at_least=at_least)
except AssertionError:
log.error("Candidates size assertion for {0}, count: {1}, "
"at_least: {2}) failed".format(msg_text, count, at_least))
raise
for ev, ec in product(self.events, candidates):
if ec == ev:
ev['tested'] = True
log.debug("{0} FOUND !".format(msg_text))
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import atexit
import logging
import re
from devil.android import device_errors
logger = logging.getLogger(__name__)
_atexit_messages = set()
# Defines how to switch between the default performance configuration
# ('default_mode') and the mode for use when benchmarking ('high_perf_mode').
# For devices not in the list the defaults are to set up the scaling governor to
# 'performance' and reset it back to 'ondemand' when benchmarking is finished.
#
# The 'default_mode_governor' is mandatory to define, while
# 'high_perf_mode_governor' is not taken into account. The latter is because the
# governor 'performance' is currently used for all benchmarking on all devices.
#
# TODO(crbug.com/383566): Add definitions for all devices used in the perf
# waterfall.
_PERFORMANCE_MODE_DEFINITIONS = {
# Fire TV Edition - 4K
'AFTKMST12': {
'default_mode_governor': 'interactive',
},
'Pixel 5': {
'big_cores': ['cpu6', 'cpu7'],
'default_mode_governor': 'schedutil',
},
'Pixel 4a': {
'big_cores': ['cpu6', 'cpu7'],
'default_mode_governor': 'schedutil',
},
'Pixel 4': {
'big_cores': ['cpu6', 'cpu7'],
'default_mode_governor': 'schedutil',
},
# Pixel 3
'blueline': {
'high_perf_mode': {
'bring_cpu_cores_online': True,
# The SoC is Arm big.LITTLE. The cores 0..3 are LITTLE,
# the 4..7 are big.
'cpu_max_freq': {
'0..3': 1228800,
'4..7': 1536000
},
'gpu_max_freq': 520000000,
},
'default_mode': {
'cpu_max_freq': {
'0..3': 1766400,
'4..7': 2649600
},
'gpu_max_freq': 710000000,
},
'big_cores': ['cpu4', 'cpu5', 'cpu6', 'cpu7'],
'default_mode_governor': 'schedutil',
},
'Pixel 2': {
'high_perf_mode': {
'bring_cpu_cores_online': True,
# These are set to roughly 7/8 of the max frequency. The purpose of
# this is to ensure that thermal throttling doesn't kick in midway
# through a test and cause flaky results. It should also improve the
# longevity of the devices by keeping them cooler.
'cpu_max_freq': {
'0..3': 1670400,
'4..7': 2208000,
},
'gpu_max_freq': 670000000,
},
'default_mode': {
# These are the maximum frequencies available for these CPUs and
# GPUs.
'cpu_max_freq': {
'0..3': 1900800,
'4..7': 2457600,
},
'gpu_max_freq': 710000000,
},
'big_cores': ['cpu4', 'cpu5', 'cpu6', 'cpu7'],
'default_mode_governor': 'schedutil',
},
'GT-I9300': {
'default_mode_governor': 'pegasusq',
},
'Galaxy Nexus': {
'default_mode_governor': 'interactive',
},
# Pixel
'msm8996': {
'high_perf_mode': {
'bring_cpu_cores_online': True,
'cpu_max_freq': 1209600,
'gpu_max_freq': 315000000,
},
'default_mode': {
# The SoC is Arm big.LITTLE. The cores 0..1 are LITTLE,
# the 2..3 are big.
'cpu_max_freq': {
'0..1': 1593600,
'2..3': 2150400
},
'gpu_max_freq': 624000000,
},
'big_cores': ['cpu2', 'cpu3'],
'default_mode_governor': 'sched',
},
'Nexus 7': {
'default_mode_governor': 'interactive',
},
'Nexus 10': {
'default_mode_governor': 'interactive',
},
'Nexus 4': {
'high_perf_mode': {
'bring_cpu_cores_online': True,
},
'default_mode_governor': 'ondemand',
},
'Nexus 5': {
# The list of possible GPU frequency values can be found in:
# /sys/class/kgsl/kgsl-3d0/gpu_available_frequencies.
# For CPU cores the possible frequency values are at:
# /sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies
'high_perf_mode': {
'bring_cpu_cores_online': True,
'cpu_max_freq': 1190400,
'gpu_max_freq': 200000000,
},
'default_mode': {
'cpu_max_freq': 2265600,
'gpu_max_freq': 450000000,
},
'default_mode_governor': 'ondemand',
},
'Nexus 5X': {
'high_perf_mode': {
'bring_cpu_cores_online': True,
'cpu_max_freq': 1248000,
'gpu_max_freq': 300000000,
},
'default_mode': {
'governor': 'ondemand',
# The SoC is ARM big.LITTLE. The cores 4..5 are big,
# the 0..3 are LITTLE.
'cpu_max_freq': {
'0..3': 1440000,
'4..5': 1824000
},
'gpu_max_freq': 600000000,
},
'big_cores': ['cpu4', 'cpu5'],
'default_mode_governor': 'ondemand',
},
}
def _GetPerfModeDefinitions(product_model):
if product_model.startswith('AOSP on '):
product_model = product_model.replace('AOSP on ', '')
return _PERFORMANCE_MODE_DEFINITIONS.get(product_model)
def _NoisyWarning(message):
message += ' Results may be NOISY!!'
logger.warning(message)
# Add an additional warning at exit, such that it's clear that any results
# may be different/noisy (due to the lack of intended performance mode).
if message not in _atexit_messages:
_atexit_messages.add(message)
atexit.register(logger.warning, message)
class PerfControl(object):
"""Provides methods for setting the performance mode of a device."""
_AVAILABLE_GOVERNORS_REL_PATH = 'cpufreq/scaling_available_governors'
_CPU_FILE_PATTERN = re.compile(r'^cpu\d+$')
_CPU_PATH = '/sys/devices/system/cpu'
_KERNEL_MAX = '/sys/devices/system/cpu/kernel_max'
def __init__(self, device):
self._device = device
self._cpu_files = []
for file_name in self._device.ListDirectory(self._CPU_PATH, as_root=True):
if self._CPU_FILE_PATTERN.match(file_name):
self._cpu_files.append(file_name)
assert self._cpu_files, 'Failed to detect CPUs.'
self._cpu_file_list = ' '.join(self._cpu_files)
logger.info('CPUs found: %s', self._cpu_file_list)
self._have_mpdecision = self._device.FileExists('/system/bin/mpdecision')
raw = self._ReadEachCpuFile(self._AVAILABLE_GOVERNORS_REL_PATH)
self._available_governors = [
(cpu, raw_governors.strip().split() if not exit_code else None)
for cpu, raw_governors, exit_code in raw
]
def _SetMaxFrequenciesFromMode(self, mode):
"""Set maximum frequencies for GPU and CPU cores.
Args:
mode: A dictionary mapping optional keys 'cpu_max_freq' and 'gpu_max_freq'
to integer values of frequency supported by the device.
"""
cpu_max_freq = mode.get('cpu_max_freq')
if cpu_max_freq:
if not isinstance(cpu_max_freq, dict):
self._SetScalingMaxFreqForCpus(cpu_max_freq, self._cpu_file_list)
else:
for key, max_frequency in cpu_max_freq.items():
# Convert 'X' to 'cpuX' and 'X..Y' to 'cpuX cpu<X+1> .. cpuY'.
if '..' in key:
range_min, range_max = key.split('..')
range_min, range_max = int(range_min), int(range_max)
else:
range_min = range_max = int(key)
cpu_files = [
'cpu%d' % number for number in range(range_min, range_max + 1)
]
# Set the |max_frequency| on requested subset of the cores.
self._SetScalingMaxFreqForCpus(max_frequency, ' '.join(cpu_files))
gpu_max_freq = mode.get('gpu_max_freq')
if gpu_max_freq:
self._SetMaxGpuClock(gpu_max_freq)
def SetHighPerfMode(self):
"""Sets the highest stable performance mode for the device."""
try:
self._device.EnableRoot()
except device_errors.CommandFailedError:
_NoisyWarning('Need root for performance mode.')
return
mode_definitions = _GetPerfModeDefinitions(self._device.product_model)
if not mode_definitions:
self.SetScalingGovernor('performance')
return
high_perf_mode = mode_definitions.get('high_perf_mode')
if not high_perf_mode:
self.SetScalingGovernor('performance')
return
if high_perf_mode.get('bring_cpu_cores_online', False):
self._ForceAllCpusOnline(True)
if not self._AllCpusAreOnline():
_NoisyWarning('Failed to force CPUs online.')
# Scaling governor must be set _after_ bringing all CPU cores online,
# otherwise it would not affect the cores that are currently offline.
self.SetScalingGovernor('performance')
self._SetMaxFrequenciesFromMode(high_perf_mode)
def SetLittleOnlyMode(self):
"""Turns off big CPU cores on the device."""
try:
self._device.EnableRoot()
except device_errors.CommandFailedError:
_NoisyWarning('Need root to turn off cores.')
return
mode_definitions = _GetPerfModeDefinitions(self._device.product_model)
if not mode_definitions:
_NoisyWarning('Unknown device: %s. Can\'t turn off cores.'
% self._device.product_model)
return
big_cores = mode_definitions.get('big_cores', [])
if not big_cores:
_NoisyWarning('No mode definition for device: %s.' %
self._device.product_model)
return
self._ForceCpusOffline(cpu_list=' '.join(big_cores))
def SetDefaultPerfMode(self):
"""Sets the performance mode for the device to its default mode."""
if not self._device.HasRoot():
return
mode_definitions = _GetPerfModeDefinitions(self._device.product_model)
if not mode_definitions:
self.SetScalingGovernor('ondemand')
else:
default_mode_governor = mode_definitions.get('default_mode_governor')
assert default_mode_governor, ('Default mode governor must be provided '
'for all perf mode definitions.')
self.SetScalingGovernor(default_mode_governor)
default_mode = mode_definitions.get('default_mode')
if default_mode:
self._SetMaxFrequenciesFromMode(default_mode)
self._ForceAllCpusOnline(False)
def SetPerfProfilingMode(self):
"""Enables all cores for reliable perf profiling."""
self._ForceAllCpusOnline(True)
self.SetScalingGovernor('performance')
if not self._AllCpusAreOnline():
if not self._device.HasRoot():
raise RuntimeError('Need root to force CPUs online.')
raise RuntimeError('Failed to force CPUs online.')
def GetCpuInfo(self):
online = (output.rstrip() == '1' and status == 0
for (_, output, status) in self._ForEachCpu('cat "$CPU/online"'))
governor = (
output.rstrip() if status == 0 else None
for (_, output,
status) in self._ForEachCpu('cat "$CPU/cpufreq/scaling_governor"'))
return list(zip(self._cpu_files, online, governor))
def _ForEachCpu(self, cmd, cpu_list=None):
"""Runs a command on the device for each of the CPUs.
Args:
cmd: A string with a shell command, may may use shell expansion: "$CPU" to
refer to the current CPU in the string form (e.g. "cpu0", "cpu1",
and so on).
cpu_list: A space-separated string of CPU core names, like in the example
above
Returns:
A list of tuples in the form (cpu_string, command_output, exit_code), one
tuple per each command invocation. As usual, all lines of the output
command are joined into one line with spaces.
"""
if cpu_list is None:
cpu_list = self._cpu_file_list
script = '; '.join([
'for CPU in %s' % cpu_list,
'do %s' % cmd, 'echo -n "%~%$?%~%"', 'done'
])
output = self._device.RunShellCommand(
script, cwd=self._CPU_PATH, check_return=True, as_root=True, shell=True)
output = '\n'.join(output).split('%~%')
return zip(self._cpu_files, output[0::2], (int(c) for c in output[1::2]))
def _ConditionallyWriteCpuFiles(self, path, value, cpu_files, condition):
template = (
'{condition} && test -e "$CPU/{path}" && echo {value} > "$CPU/{path}"')
results = self._ForEachCpu(
template.format(path=path, value=value, condition=condition), cpu_files)
cpus = ' '.join(cpu for (cpu, _, status) in results if status == 0)
if cpus:
logger.info('Successfully set %s to %r on: %s', path, value, cpus)
else:
logger.warning('Failed to set %s to %r on any cpus', path, value)
def _WriteCpuFiles(self, path, value, cpu_files):
self._ConditionallyWriteCpuFiles(path, value, cpu_files, condition='true')
def _ReadEachCpuFile(self, path):
return self._ForEachCpu('cat "$CPU/{path}"'.format(path=path))
def SetScalingGovernor(self, value):
"""Sets the scaling governor to the given value on all possible CPUs.
This does not attempt to set a governor to a value not reported as available
on the corresponding CPU.
Args:
value: [string] The new governor value.
"""
condition = 'test -e "{path}" && grep -q {value} {path}'.format(
path=('${CPU}/%s' % self._AVAILABLE_GOVERNORS_REL_PATH), value=value)
self._ConditionallyWriteCpuFiles('cpufreq/scaling_governor', value,
self._cpu_file_list, condition)
def GetScalingGovernor(self):
"""Gets the currently set governor for each CPU.
Returns:
An iterable of 2-tuples, each containing the cpu and the current
governor.
"""
raw = self._ReadEachCpuFile('cpufreq/scaling_governor')
return [(cpu, raw_governor.strip() if not exit_code else None)
for cpu, raw_governor, exit_code in raw]
def ListAvailableGovernors(self):
"""Returns the list of available governors for each CPU.
Returns:
An iterable of 2-tuples, each containing the cpu and a list of available
governors for that cpu.
"""
return self._available_governors
def _SetScalingMaxFreqForCpus(self, value, cpu_files):
self._WriteCpuFiles('cpufreq/scaling_max_freq', '%d' % value, cpu_files)
def _SetMaxGpuClock(self, value):
self._device.WriteFile(
'/sys/class/kgsl/kgsl-3d0/max_gpuclk', str(value), as_root=True)
def _AllCpusAreOnline(self):
results = self._ForEachCpu('cat "$CPU/online"')
# The file 'cpu0/online' is missing on some devices (example: Nexus 9). This
# is likely because on these devices it is impossible to bring the cpu0
# offline. Assuming the same for all devices until proven otherwise.
return all(output.rstrip() == '1' and status == 0
for (cpu, output, status) in results if cpu != 'cpu0')
def _ForceAllCpusOnline(self, force_online):
"""Enable all CPUs on a device.
Some vendors (or only Qualcomm?) hot-plug their CPUs, which can add noise
to measurements:
- In perf, samples are only taken for the CPUs that are online when the
measurement is started.
- The scaling governor can't be set for an offline CPU and frequency scaling
on newly enabled CPUs adds noise to both perf and tracing measurements.
It appears Qualcomm is the only vendor that hot-plugs CPUs, and on Qualcomm
this is done by "mpdecision".
"""
if self._have_mpdecision:
cmd = ['stop', 'mpdecision'] if force_online else ['start', 'mpdecision']
self._device.RunShellCommand(cmd, check_return=True, as_root=True)
if not self._have_mpdecision and not self._AllCpusAreOnline():
logger.warning('Unexpected cpu hot plugging detected.')
if force_online:
self._ForEachCpu('echo 1 > "$CPU/online"')
def _ForceCpusOffline(self, cpu_list):
"""Disable selected CPUs on a device."""
if self._have_mpdecision:
cmd = ['stop', 'mpdecision']
self._device.RunShellCommand(cmd, check_return=True, as_root=True)
self._ForEachCpu('echo 0 > "$CPU/online"', cpu_list=cpu_list)
|
|
# -*- coding: utf-8 -*-
# MIT license
#
# Copyright (C) 2015-2019 by XESS Corp.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, division, print_function
import argparse as ap
import importlib
import io
import math
import os
import re
import sys
import zipfile
from builtins import str
from collections import OrderedDict
from copy import copy
from pprint import pprint
from affine import Affine
from past.utils import old_div
from .common import *
from .pckg_info import __version__
from .py_2_3 import *
__all__ = ["kipart"] # Only export this routine for use by the outside world.
THIS_MODULE = sys.modules[__name__] # Ref to this module for making named calls.
# Settings for creating the KiCad schematic part symbol.
# Dimensions are given in mils (0.001").
# Origin point.
XO = 0
YO = 0
# Pin settings.
PIN_LENGTH = 200
PIN_SPACING = 100
PIN_NUM_SIZE = 50 # Font size for pin numbers.
PIN_NAME_SIZE = 50 # Font size for pin names.
PIN_NAME_OFFSET = 40 # Separation between pin and pin name.
PIN_ORIENTATION = "left"
PIN_STYLE = "line"
SHOW_PIN_NUMBER = True # Show pin numbers when True.
SHOW_PIN_NAME = True # Show pin names when True.
SINGLE_PIN_SUFFIX = ""
MULTI_PIN_SUFFIX = "*"
PIN_SPACER_PREFIX = "*"
# Settings for box drawn around pins in a unit.
DEFAULT_BOX_LINE_WIDTH = 0
# Mapping from understandable schematic symbol box fill-type name
# to the fill-type indicator used in the KiCad part library.
BOX_FILLS = {"no_fill": "N", "fg_fill": "F", "bg_fill": "f"}
DEFAULT_BOX_FILL = "bg_fill"
# Part reference.
REF_SIZE = 60 # Font size.
REF_Y_OFFSET = 250
# Part number.
PART_NUM_SIZE = 60 # Font size.
PART_NUM_Y_OFFSET = 150
# Part footprint
PART_FOOTPRINT_SIZE = 60 # Font size.
PART_FOOTPRINT_Y_OFFSET = 50
# Part manufacturer number.
PART_MPN_SIZE = 60 # Font size.
PART_MPN_Y_OFFSET = -50
# Part datasheet.
PART_DATASHEET_SIZE = 60 # Font size.
PART_DATASHEET_Y_OFFSET = -150
# Part description.
PART_DESC_SIZE = 60 # Font size.
PART_DESC_Y_OFFSET = -250
# Mapping from understandable pin orientation name to the orientation
# indicator used in the KiCad part library. This mapping looks backward,
# but if pins are placed on the left side of the symbol, you actually
# want to use the pin symbol where the line points to the right.
# The same goes for the other sides.
PIN_ORIENTATIONS = {
"": "R",
"left": "R",
"right": "L",
"bottom": "U",
"down": "U",
"top": "D",
"up": "D",
}
scrubber = re.compile("[^\w~#]+")
PIN_ORIENTATIONS = {
scrubber.sub("", k).lower(): v for k, v in list(PIN_ORIENTATIONS.items())
}
ROTATION = {"left": 0, "right": 180, "bottom": 90, "top": -90}
# Mapping from understandable pin type name to the type
# indicator used in the KiCad part library.
PIN_TYPES = {
"input": "I",
"inp": "I",
"in": "I",
"clk": "I",
"output": "O",
"outp": "O",
"out": "O",
"bidirectional": "B",
"bidir": "B",
"bi": "B",
"inout": "B",
"io": "B",
"iop": "B",
"tristate": "T",
"tri": "T",
"passive": "P",
"pass": "P",
"unspecified": "U",
"un": "U",
"": "U",
"analog": "U",
"power_in": "W",
"pwr_in": "W",
"pwrin": "W",
"power": "W",
"pwr": "W",
"ground": "W",
"gnd": "W",
"power_out": "w",
"pwr_out": "w",
"pwrout": "w",
"pwr_o": "w",
"open_collector": "C",
"opencollector": "C",
"open_coll": "C",
"opencoll": "C",
"oc": "C",
"open_emitter": "E",
"openemitter": "E",
"open_emit": "E",
"openemit": "E",
"oe": "E",
"no_connect": "N",
"noconnect": "N",
"no_conn": "N",
"noconn": "N",
"nc": "N",
}
PIN_TYPES = {scrubber.sub("", k).lower(): v for k, v in list(PIN_TYPES.items())}
# Mapping from understandable pin drawing style to the style
# indicator used in the KiCad part library.
PIN_STYLES = {
"line": "",
"": "",
"inverted": "I",
"inv": "I",
"~": "I",
"#": "I",
"clock": "C",
"clk": "C",
"rising_clk": "C",
"inverted_clock": "IC",
"inv_clk": "IC",
"clk_b": "IC",
"clk_n": "IC",
"~clk": "IC",
"#clk": "IC",
"input_low": "L",
"inp_low": "L",
"in_lw": "L",
"in_b": "L",
"in_n": "L",
"~in": "L",
"#in": "L",
"clock_low": "CL",
"clk_low": "CL",
"clk_lw": "CL",
"output_low": "V",
"outp_low": "V",
"out_lw": "V",
"out_b": "V",
"out_n": "V",
"~out": "V",
"#out": "V",
"falling_edge_clock": "F",
"falling_clk": "F",
"fall_clk": "F",
"non_logic": "X",
"nl": "X",
"analog": "X",
}
PIN_STYLES = {scrubber.sub("", k).lower(): v for k, v in list(PIN_STYLES.items())}
# Format strings for various items in a KiCad part library.
LIB_HEADER = "EESchema-LIBRARY Version 2.3\n"
START_DEF = "DEF {name} {ref} 0 {pin_name_offset} {show_pin_number} {show_pin_name} {num_units} L N\n"
END_DEF = "ENDDEF\n"
REF_FIELD = 'F0 "{ref_prefix}" {x} {y} {font_size} H V {text_justification} CNN\n'
PARTNUM_FIELD = 'F1 "{num}" {x} {y} {font_size} H V {text_justification} CNN\n'
FOOTPRINT_FIELD = 'F2 "{footprint}" {x} {y} {font_size} H I {text_justification} CNN\n'
DATASHEET_FIELD = 'F3 "{datasheet}" {x} {y} {font_size} H I {text_justification} CNN\n'
MPN_FIELD = 'F4 "{manf_num}" {x} {y} {font_size} H I {text_justification} CNN "manf#"\n'
DESC_FIELD = 'F5 "{desc}" {x} {y} {font_size} H I {text_justification} CNN "desc"\n'
START_DRAW = "DRAW\n"
END_DRAW = "ENDDRAW\n"
BOX = "S {x0} {y0} {x1} {y1} {unit_num} 1 {line_width} {fill}\n"
PIN = "X {name} {num} {x} {y} {length} {orientation} {num_sz} {name_sz} {unit_num} 1 {pin_type} {pin_style}\n"
def annotate_pins(unit_pins):
"""Annotate pin names to indicate special information."""
for name, pins in unit_pins:
# If there are multiple pins with the same name in a unit, then append a
# distinctive suffix to the pin name to indicate multiple pins are placed
# at a single location on the unit. (This is done so multiple pins that
# should be on the same net (e.g. GND) can be connected using a single
# net connection in the schematic.)
name_suffix = SINGLE_PIN_SUFFIX
if len(pins) > 1:
# name_suffix = MULTI_PIN_SUFFIX
name_suffix = "[{}]".format(len(pins))
for pin in pins:
pin.name += name_suffix
def get_pin_num_and_spacer(pin):
pin_num = str(pin.num)
pin_spacer = 0
# spacer pins have pin numbers starting with a special prefix char.
if pin_num.startswith(PIN_SPACER_PREFIX):
pin_spacer = 1
pin_num = pin_num[1:] # Remove the spacer prefix.
return pin_num, pin_spacer
def count_pin_slots(unit_pins):
"""Count the number of vertical pin slots needed for a column of pins."""
# Compute the # of slots for the column of pins, taking spacers into account.
num_slots = 0
pin_num_len = 0
for name, pins in unit_pins:
pin_spacer = 0
pin_num_len = 0
for pin in pins:
pin_num, pin_spacer = get_pin_num_and_spacer(pin)
pin_num_len = max(pin_num_len, len(pin_num))
num_slots += pin_spacer # Add a slot if there was a spacer.
# Add a slot if the pin number was more than just a spacer prefix.
if pin_num_len > 0:
num_slots += 1
return num_slots
def pins_bbox(unit_pins):
"""Return the bounding box of a column of pins and their names."""
if len(unit_pins) == 0:
return [[XO, YO], [XO, YO]] # No pins, so no bounding box.
width = 0
for name, pins in unit_pins:
# Update the maximum observed width of a pin name. This is used later to
# size the width of the box surrounding the pin names for this unit.
width = max(width, len(pins[0].name) * PIN_NAME_SIZE)
# Add the separation space before and after the pin name.
width += PIN_LENGTH + 2 * PIN_NAME_OFFSET
# Make bounding box an integer number of pin spaces so pin connections are always on the grid.
width = math.ceil(old_div(float(width), PIN_SPACING)) * PIN_SPACING
# Compute the height of the column of pins.
height = count_pin_slots(unit_pins) * PIN_SPACING
return [[XO, YO + PIN_SPACING], [XO + width, YO - height]]
def balance_bboxes(bboxes):
"""Make the symbol more balanced by adjusting the bounding boxes of the pins on each side."""
X = 0
Y = 1
def find_bbox_bbox(*bboxes):
"""Find the bounding box for a set of bounding boxes."""
bb = [[0, 0], [0, 0]]
for bbox in bboxes:
bb[0][X] = min(bb[0][X], bbox[0][X])
bb[1][X] = max(bb[1][X], bbox[1][X])
bb[0][Y] = max(bb[0][Y], bbox[0][Y])
bb[1][Y] = min(bb[1][Y], bbox[1][Y])
return bb
# Determine the number of sides of the symbol with pins.
num_sides = len(bboxes)
if num_sides == 4:
# If the symbol has pins on all four sides, then check to see if there
# are approximately the same number of pins on all four sides. If so,
# then equalize the bounding box for each side. Otherwise, equalize
# the left & right bounding boxes and the top & bottom bounding boxes.
lr_bbox = find_bbox_bbox(bboxes["left"], bboxes["right"])
lr_hgt = abs(lr_bbox[0][Y] - lr_bbox[1][Y])
tb_bbox = find_bbox_bbox(bboxes["top"], bboxes["bottom"])
tb_hgt = abs(tb_bbox[0][Y] - tb_bbox[1][Y])
if 0.75 <= float(lr_hgt) / float(tb_hgt) <= 1 / 0.75:
bal_bbox = find_bbox_bbox(*list(bboxes.values()))
for side in bboxes:
bboxes[side] = copy(bal_bbox)
else:
bboxes["left"] = copy(lr_bbox)
bboxes["right"] = copy(lr_bbox)
bboxes["top"] = copy(tb_bbox)
bboxes["bottom"] = copy(tb_bbox)
elif num_sides == 3:
# If the symbol only has pins on threee sides, then equalize the
# bounding boxes for the pins on opposite sides and leave the
# bounding box on the other side unchanged.
if "left" not in bboxes or "right" not in bboxes:
# Top & bottom side pins, but the left or right side is empty.
bal_bbox = find_bbox_bbox(bboxes["top"], bboxes["bottom"])
bboxes["top"] = copy(bal_bbox)
bboxes["bottom"] = copy(bal_bbox)
elif "top" not in bboxes or "bottom" not in bboxes:
# Left & right side pins, but the top or bottom side is empty.
bal_bbox = find_bbox_bbox(bboxes["left"], bboxes["right"])
bboxes["left"] = copy(bal_bbox)
bboxes["right"] = copy(bal_bbox)
elif num_sides == 2:
# If the symbol only has pins on two opposing sides, then equalize the
# height of the bounding boxes for each side. Leave the width unchanged.
if "left" in bboxes and "right" in bboxes:
bal_bbox = find_bbox_bbox(bboxes["left"], bboxes["right"])
bboxes["left"][0][Y] = bal_bbox[0][Y]
bboxes["left"][1][Y] = bal_bbox[1][Y]
bboxes["right"][0][Y] = bal_bbox[0][Y]
bboxes["right"][1][Y] = bal_bbox[1][Y]
elif "top" in bboxes and "bottom" in bboxes:
bal_bbox = find_bbox_bbox(bboxes["top"], bboxes["bottom"])
bboxes["top"][0][Y] = bal_bbox[0][Y]
bboxes["top"][1][Y] = bal_bbox[1][Y]
bboxes["bottom"][0][Y] = bal_bbox[0][Y]
bboxes["bottom"][1][Y] = bal_bbox[1][Y]
def draw_pins(unit_num, unit_pins, bbox, transform, side, push, fuzzy_match):
"""Draw a column of pins rotated/translated by the transform matrix."""
# String to add pin definitions to.
pin_defn = ""
# Find the actual height of the column of pins and subtract it from the
# bounding box (which should be at least as large). Half the difference
# will be the offset needed to center the pins on the side of the symbol.
Y = 1 # Index for Y coordinate.
pins_bb = pins_bbox(unit_pins)
height_offset = abs(bbox[0][Y] - bbox[1][Y]) - abs(pins_bb[0][Y] - pins_bb[1][Y])
push = min(max(0.0, push), 1.0)
if side in ("right", "top"):
push = 1.0 - push
height_offset *= push
height_offset -= height_offset % PIN_SPACING # Keep stuff on the PIN_SPACING grid.
# Start drawing pins from the origin.
x = XO
y = YO - height_offset
for name, pins in unit_pins:
# Detect pins with "spacer" pin numbers.
pin_spacer = 0
pin_num_len = 0
for pin in pins:
pin_num, pin_spacer = get_pin_num_and_spacer(pin)
pin_num_len = max(pin_num_len, len(pin_num))
y -= pin_spacer * PIN_SPACING # Add space between pins if there was a spacer.
if pin_num_len == 0:
continue # Omit pin if it only had a spacer prefix and no actual pin number.
# Rotate/translate the current drawing point.
(draw_x, draw_y) = transform * (x, y)
# Use approximate matching to determine the pin's type, style and orientation.
pin_type = find_closest_match(pins[0].type, PIN_TYPES, fuzzy_match)
pin_style = find_closest_match(pins[0].style, PIN_STYLES, fuzzy_match)
pin_side = find_closest_match(pins[0].side, PIN_ORIENTATIONS, fuzzy_match)
if pins[0].hidden.lower().strip() in ["y", "yes", "t", "true", "1"]:
pin_style = "N" + pin_style
# Create all the pins with a particular name. If there are more than one,
# they are laid on top of each other and only the first is visible.
num_size = PIN_NUM_SIZE # First pin will be visible.
for pin in pins:
pin_num = str(pin.num)
# Remove any spacer prefix on the pin numbers.
if pin_num.startswith(PIN_SPACER_PREFIX):
pin_num = pin_num[1:]
# Create a pin using the pin data.
pin_defn += PIN.format(
name=pin.name,
num=pin_num,
x=int(draw_x),
y=int(draw_y),
length=PIN_LENGTH,
orientation=pin_side,
num_sz=num_size,
name_sz=PIN_NAME_SIZE,
unit_num=unit_num,
pin_type=pin_type,
pin_style=pin_style,
)
# Turn off visibility after the first pin.
num_size = 0
# Move to the next pin placement location on this unit.
y -= PIN_SPACING
return pin_defn # Return part symbol definition with pins added.
def zero_pad_nums(s):
# Pad all numbers in the string with leading 0's.
# Thus, 'A10' and 'A2' will become 'A00010' and 'A00002' and A2 will
# appear before A10 in a list.
try:
return re.sub(
r"\d+", lambda mtch: "0" * (8 - len(mtch.group(0))) + mtch.group(0), s
)
except TypeError:
return s # The input is probably not a string, so just return it unchanged.
def num_key(pin):
"""Generate a key from a pin's number so they are sorted by position on the package."""
# Pad all numeric strings in the pin name with leading 0's.
# Thus, 'A10' and 'A2' will become 'A00010' and 'A00002' and A2 will
# appear before A10 in a list.
return zero_pad_nums(pin[1][0].num)
def name_key(pin):
"""Generate a key from a pin's name so they are sorted more logically."""
# Pad all numeric strings in the pin name with leading 0's.
# Thus, 'adc10' and 'adc2' will become 'adc00010' and 'adc00002' and adc2 will
# appear before adc10 in a list.
return zero_pad_nums(pin[1][0].name)
def row_key(pin):
"""Generate a key from the order the pins were entered into the CSV file."""
return pin[1][0].index
def draw_symbol(
part_num,
part_ref_prefix,
part_footprint,
part_manf_num,
part_datasheet,
part_desc,
pin_data,
sort_type,
reverse,
fuzzy_match,
fill,
box_line_width,
push,
):
"""Add a symbol for a part to the library."""
# Start the part definition with the header.
part_defn = START_DEF.format(
name=part_num,
ref=part_ref_prefix,
pin_name_offset=PIN_NAME_OFFSET,
show_pin_number=SHOW_PIN_NUMBER and "Y" or "N",
show_pin_name=SHOW_PIN_NAME and "Y" or "N",
num_units=len(pin_data),
)
# Determine if there are pins across the top of the symbol.
# If so, right-justify the reference, part number, etc. so they don't
# run into the top pins. If not, stick with left-justification.
text_justification = "L"
horiz_offset = PIN_LENGTH
for unit in list(pin_data.values()):
if "top" in list(unit.keys()):
text_justification = "R"
horiz_offset = PIN_LENGTH - 50
break
# Create the field that stores the part reference.
if not part_ref_prefix:
part_ref_prefix = "U"
part_defn += REF_FIELD.format(
ref_prefix=part_ref_prefix,
x=XO + horiz_offset,
y=YO + REF_Y_OFFSET,
text_justification=text_justification,
font_size=REF_SIZE,
)
# Create the field that stores the part number.
if not part_num:
part_num = ""
part_defn += PARTNUM_FIELD.format(
num=part_num,
x=XO + horiz_offset,
y=YO + PART_NUM_Y_OFFSET,
text_justification=text_justification,
font_size=PART_NUM_SIZE,
)
# Create the field that stores the part footprint.
if not part_footprint:
part_footprint = ""
part_defn += FOOTPRINT_FIELD.format(
footprint=part_footprint,
x=XO + horiz_offset,
y=YO + PART_FOOTPRINT_Y_OFFSET,
text_justification=text_justification,
font_size=PART_FOOTPRINT_SIZE,
)
# Create the field that stores the datasheet link.
if not part_datasheet:
part_datasheet = ""
part_defn += DATASHEET_FIELD.format(
datasheet=part_datasheet,
x=XO + horiz_offset,
y=YO + PART_DATASHEET_Y_OFFSET,
text_justification=text_justification,
font_size=PART_DATASHEET_SIZE,
)
# Create the field that stores the manufacturer part number.
if part_manf_num:
part_defn += MPN_FIELD.format(
manf_num=part_manf_num,
x=XO + horiz_offset,
y=YO + PART_MPN_Y_OFFSET,
text_justification=text_justification,
font_size=PART_MPN_SIZE,
)
# Create the field that stores the datasheet link.
if part_desc:
part_defn += DESC_FIELD.format(
desc=part_desc,
x=XO + horiz_offset,
y=YO + PART_DESC_Y_OFFSET,
text_justification=text_justification,
font_size=PART_DESC_SIZE,
)
# Start the section of the part definition that holds the part's units.
part_defn += START_DRAW
# Get a reference to the sort-key generation function for pins.
pin_key_func = getattr(THIS_MODULE, "{}_key".format(sort_type))
# This is the sort-key generation function for unit names.
unit_key_func = lambda x: zero_pad_nums(x[0])
# Now create the units that make up the part. Unit numbers go from 1
# up to the number of units in the part. The units are sorted by their
# names before assigning unit numbers.
for unit_num, unit in enumerate(
[p[1] for p in sorted(pin_data.items(), key=unit_key_func)], 1
):
# The indices of the X and Y coordinates in a list of point coords.
X = 0
Y = 1
# Initialize data structures that store info for each side of a schematic symbol unit.
all_sides = ["left", "right", "top", "bottom"]
bbox = {side: [(XO, YO), (XO, YO)] for side in all_sides}
box_pt = {side: [XO + PIN_LENGTH, YO + PIN_SPACING] for side in all_sides}
anchor_pt = {side: [XO + PIN_LENGTH, YO + PIN_SPACING] for side in all_sides}
transform = {}
# Annotate the pins for each side of the symbol.
for side_pins in list(unit.values()):
annotate_pins(list(side_pins.items()))
# Determine the actual bounding box for each side.
bbox = {}
for side, side_pins in list(unit.items()):
bbox[side] = pins_bbox(list(side_pins.items()))
# Adjust the sizes of the bboxes to make the unit look more symmetrical.
balance_bboxes(bbox)
# Determine some important points for each side of pins.
for side in unit:
#
# C B-------A
# | |
# ------| name1 |
# | |
# ------| name2 |
#
# A = anchor point = upper-right corner of bounding box.
# B = box point = upper-left corner of bounding box + pin length.
# C = upper-left corner of bounding box.
anchor_pt[side] = [
max(bbox[side][0][X], bbox[side][1][X]),
max(bbox[side][0][Y], bbox[side][1][Y]),
]
box_pt[side] = [
min(bbox[side][0][X], bbox[side][1][X]) + PIN_LENGTH,
max(bbox[side][0][Y], bbox[side][1][Y]),
]
# AL = left-side anchor point.
# AB = bottom-side anchor point.
# AR = right-side anchor point.
# AT = top-side anchor-point.
# +-------------+
# | |
# | TOP |
# | |
# +------AL------------AT
# | |
# | | +---------+
# | | | |
# | L | | |
# | E | | R |
# | F | | I |
# | T | | G |
# | | | H |
# | | | T |
# | | | |
# +------AB-------+ AR--------+
# | BOTTOM |
# +--------+
#
# Create zero-sized bounding boxes for any sides of the unit without pins.
# This makes it simpler to do the width/height calculation that follows.
for side in all_sides:
if side not in bbox:
bbox[side] = [(XO, YO), (XO, YO)]
# This is the width and height of the box in the middle of the pins on each side.
box_width = max(
abs(bbox["top"][0][Y] - bbox["top"][1][Y]),
abs(bbox["bottom"][0][Y] - bbox["bottom"][1][Y]),
)
box_height = max(
abs(bbox["left"][0][Y] - bbox["left"][1][Y]),
abs(bbox["right"][0][Y] - bbox["right"][1][Y]),
)
for side in all_sides:
# Each side of pins starts off with the orientation of a left-hand side of pins.
# Transformation matrix starts by rotating the side of pins.
transform[side] = Affine.rotation(ROTATION[side])
# Now rotate the anchor point to see where it goes.
rot_anchor_pt = transform[side] * anchor_pt[side]
# Translate the rotated anchor point to coincide with the AL anchor point.
translate_x = anchor_pt["left"][X] - rot_anchor_pt[X]
translate_y = anchor_pt["left"][Y] - rot_anchor_pt[Y]
# Make additional translation to bring the AL point to the correct position.
if side == "right":
# Translate AL to AR.
translate_x += box_width
translate_y -= box_height
elif side == "bottom":
# Translate AL to AB
translate_y -= box_height
elif side == "top":
# Translate AL to AT
translate_x += box_width
# Create the complete transformation matrix = rotation followed by translation.
transform[side] = (
Affine.translation(translate_x, translate_y) * transform[side]
)
# Also translate the point on each side that defines the box around the symbol.
box_pt[side] = transform[side] * box_pt[side]
# Draw the transformed pins for each side of the symbol.
for side, side_pins in list(unit.items()):
# If the pins are ordered by their row in the spreadsheet or by their name,
# then reverse their order on the right and top sides so they go from top-to-bottom
# on the right side and left-to-right on the top side instead of the opposite
# as happens with counter-clockwise pin-number ordering.
side_reverse = reverse
if sort_type in ["name", "row"] and side in ["right", "top"]:
side_reverse = not reverse
# Sort the pins for the desired order: row-wise, numeric (pin #), alphabetical (pin name).
sorted_side_pins = sorted(
list(side_pins.items()), key=pin_key_func, reverse=side_reverse
)
# Draw the transformed pins for this side of the symbol.
part_defn += draw_pins(
unit_num, sorted_side_pins, bbox[side], transform[side], side, push, fuzzy_match
)
# Create the box around the unit's pins.
part_defn += BOX.format(
x0=int(box_pt["left"][X]),
y0=int(box_pt["top"][Y]),
x1=int(box_pt["right"][X]),
y1=int(box_pt["bottom"][Y]),
unit_num=unit_num,
line_width=box_line_width,
fill=BOX_FILLS[fill],
)
# Close the section that holds the part's units.
part_defn += END_DRAW
# Close the part definition.
part_defn += END_DEF
# Return complete part symbol definition.
return part_defn
def is_pwr(pin, fuzzy_match):
"""Return true if this is a power input pin."""
return (
find_closest_match(name=pin.type, name_dict=PIN_TYPES, fuzzy_match=fuzzy_match)
== "W"
)
def do_bundling(pin_data, bundle, fuzzy_match):
"""Handle bundling for power pins. Unbundle everything else."""
for unit in list(pin_data.values()):
for side in list(unit.values()):
for name, pins in list(side.items()):
if len(pins) > 1:
for index, p in enumerate(pins):
if is_pwr(p, fuzzy_match) and bundle:
side[p.name + "_pwr"].append(p)
else:
side[p.name + "_" + str(index)].append(p)
del side[name]
def scan_for_readers():
"""Look for scripts for reading part description files."""
trailer = "_reader.py" # Reader file names always end with this.
readers = {}
for dir in [os.path.dirname(os.path.abspath(__file__)), "."]:
for f in os.listdir(dir):
if f.endswith(trailer):
reader_name = f.replace(trailer, "")
readers[reader_name] = dir
return readers
def kipart(
part_reader,
part_data_file,
part_data_file_name,
part_data_file_type,
parts_lib,
fill,
box_line_width,
push,
allow_overwrite=False,
sort_type="name",
reverse=False,
fuzzy_match=False,
bundle=False,
debug_level=0,
):
"""Read part pin data from a CSV/text/Excel file and write or append it to a library file."""
# Get the part number and pin data from the CSV file.
for (
part_num,
part_ref_prefix,
part_footprint,
part_manf_num,
part_datasheet,
part_desc,
pin_data,
) in part_reader(part_data_file, part_data_file_name, part_data_file_type):
# Handle retaining/overwriting parts that are already in the library.
if parts_lib.get(part_num):
if allow_overwrite:
print("Overwriting part {}!".format(part_num))
else:
print("Retaining previous definition of part {}.".format(part_num))
continue
do_bundling(pin_data, bundle, fuzzy_match)
# Draw the schematic symbol into the library.
parts_lib[part_num] = draw_symbol(
part_num=part_num,
part_ref_prefix=part_ref_prefix,
part_footprint=part_footprint,
part_manf_num=part_manf_num,
part_datasheet=part_datasheet,
part_desc=part_desc,
pin_data=pin_data,
sort_type=sort_type,
reverse=reverse,
fuzzy_match=fuzzy_match,
fill=fill,
box_line_width=box_line_width,
push=push,
)
def read_lib_file(lib_file):
parts_lib = OrderedDict()
with open(lib_file, "r") as lib:
part_def = ""
for line in lib:
start = re.match("DEF (?P<part_name>\S+)", line)
end = re.match("ENDDEF$", line)
if start:
part_def = line
part_name = start.group("part_name")
elif end:
part_def += line
parts_lib[part_name] = part_def
else:
part_def += line
return parts_lib
def write_lib_file(parts_lib, lib_file):
print("Writing", lib_file, len(parts_lib))
LIB_HEADER = "EESchema-LIBRARY Version 2.3\n"
with open(lib_file, "w") as lib_fp:
lib_fp.write(LIB_HEADER)
for part_def in parts_lib.values():
lib_fp.write(part_def)
def call_kipart(args, part_reader, part_data_file, file_name, file_type, parts_lib):
"""Helper routine for calling kipart from main()."""
return kipart(
part_reader=part_reader,
part_data_file=part_data_file,
part_data_file_name=file_name,
part_data_file_type=file_type,
parts_lib=parts_lib,
fill=args.fill,
box_line_width=args.box_line_width,
push=args.push,
allow_overwrite=args.overwrite,
sort_type=args.sort,
reverse=args.reverse,
fuzzy_match=args.fuzzy_match,
bundle=args.bundle,
debug_level=args.debug,
)
def main():
# Get Python routines for reading part description/CSV files.
readers = scan_for_readers()
parser = ap.ArgumentParser(
description="Generate single & multi-unit schematic symbols for KiCad from a CSV file."
)
parser.add_argument(
"-v", "--version", action="version", version="KiPart " + __version__
)
parser.add_argument(
"input_files",
nargs="+",
type=str,
metavar="file.[csv|txt|xlsx|zip]",
help="Files for parts in CSV/text/Excel format or as such files in .zip archives.",
)
parser.add_argument(
"-r",
"--reader",
nargs="?",
type=lambda s: unicode(s).lower(),
choices=readers.keys(),
default="generic",
help="Name of function for reading the CSV or part description files.",
)
parser.add_argument(
"-s",
"--sort",
nargs="?",
# type=str.lower,
type=lambda s: unicode(s).lower(),
choices=["row", "num", "name"],
default="row",
help="Sort the part pins by their entry order in the CSV file, their pin number, or their pin name.",
)
parser.add_argument(
"--reverse", action="store_true", help="Sort pins in reverse order."
)
parser.add_argument(
"--side",
nargs="?",
# type=str.lower,
type=lambda s: unicode(s).lower(),
choices=["left", "right", "top", "bottom"],
default="left",
help="Which side to place the pins by default.",
)
parser.add_argument(
"--fill",
nargs="?",
type=lambda s: unicode(s).lower(),
choices=BOX_FILLS.keys(),
default=DEFAULT_BOX_FILL,
help="Select fill style for schematic symbol boxes.",
)
parser.add_argument(
"--box_line_width",
type=int,
default=DEFAULT_BOX_LINE_WIDTH,
help="Set line width of the schematic symbol box.",
)
parser.add_argument(
"--push",
type=float,
default=0.5,
help="Push pins left/up (0.0), center (0.5), or right/down(1.0) on the sides of the schematic symbol box."
)
parser.add_argument(
"-o",
"--output",
nargs="?",
type=str,
metavar="file.lib",
help="Generated KiCad symbol library for parts.",
)
parser.add_argument(
"-f",
"--fuzzy_match",
action="store_true",
help="Use approximate string matching when looking-up the pin type, style and orientation.",
)
parser.add_argument(
"-b",
"--bundle",
action="store_true",
help="Bundle multiple, identically-named power and ground pins each into a single schematic pin.",
)
parser.add_argument(
"-a",
"--append",
"--add",
action="store_true",
help="Add parts to an existing part library. Overwrite existing parts only if used in conjunction with -w.",
)
parser.add_argument(
"-w",
"--overwrite",
action="store_true",
help="Allow overwriting of an existing part library.",
)
parser.add_argument(
"-d",
"--debug",
nargs="?",
type=int,
default=0,
metavar="LEVEL",
help="Print debugging info. (Larger LEVEL means more info.)",
)
args = parser.parse_args()
# kipart f1.csv f2.csv # Create f1.lib, f2.lib
# kipart f1.csv f2.csv -w # Overwrite f1.lib, f2.lib
# kipart f1.csv f2.csv -a # Append to f1.lib, f2.lib
# kipart f1.csv f2.csv -o f.lib # Create f.lib
# kipart f1.csv f2.csv -w -o f.lib # Overwrite f.lib
# kipart f1.csv f2.csv -a -o f.lib # Append to f.lib
# Load the function for reading the part description file.
part_reader_name = args.reader + "_reader" # Name of the reader module.
reader_dir = readers[args.reader]
sys.path.append(reader_dir) # Import from dir where the reader is
if reader_dir == ".":
importlib.import_module(part_reader_name) # Import module.
reader_module = sys.modules[part_reader_name] # Get imported module.
else:
importlib.import_module("kipart." + part_reader_name) # Import module.
reader_module = sys.modules[
"kipart." + part_reader_name
] # Get imported module.
part_reader = getattr(reader_module, part_reader_name) # Get reader function.
DEFAULT_PIN.side = args.side
check_file_exists = True # Used to check for existence of a single output lib file.
for input_file in args.input_files:
# No explicit output lib file, so each individual input file will generate its own .lib file.
if check_file_exists or not args.output:
output_file = args.output or os.path.splitext(input_file)[0] + ".lib"
if os.path.isfile(output_file):
# The output lib file already exists.
if args.overwrite:
# Overwriting an existing file, so ignore the existing parts.
parts_lib = OrderedDict()
elif args.append:
# Appending to an existing file, so read in existing parts.
parts_lib = read_lib_file(output_file)
else:
print(
"Output file {} already exists! Use the --overwrite option to replace it or the --append option to append to it.".format(
output_file
)
)
sys.exit(1)
else:
# Lib file doesn't exist, so create a new lib file starting with no parts.
parts_lib = OrderedDict()
# Don't setup the output lib file again if -o option was used to specify a single output lib.
check_file_exists = not args.output
file_ext = os.path.splitext(input_file)[-1].lower() # Get input file extension.
if file_ext == ".zip":
# Process the individual files inside a ZIP archive.
with zipfile.ZipFile(input_file, "r") as zip_file:
for zipped_file in zip_file.infolist():
zip_file_ext = os.path.splitext(zipped_file.filename)[-1]
if zip_file_ext in [".csv", ".txt"]:
# Only process CSV, TXT, Excel files in the archive.
with zip_file.open(zipped_file, "r") as part_data_file:
part_data_file = io.TextIOWrapper(part_data_file)
call_kipart(
args,
part_reader,
part_data_file,
zipped_file.filename,
zip_file_ext,
parts_lib,
)
elif zip_file_ext in [".xlsx"]:
xlsx_data = zip_file.read(zipped_file)
part_data_file = io.BytesIO(xlsx_data)
call_kipart(
args,
part_reader,
part_data_file,
zipped_file.filename,
zip_file_ext,
parts_lib,
)
else:
# Skip unrecognized files.
continue
elif file_ext in [".csv", ".txt"]:
# Process CSV and TXT files.
with open(input_file, "r") as part_data_file:
call_kipart(
args, part_reader, part_data_file, input_file, file_ext, parts_lib
)
elif file_ext in [".xlsx"]:
# Process Excel files.
with open(input_file, "rb") as part_data_file:
call_kipart(
args, part_reader, part_data_file, input_file, file_ext, parts_lib
)
else:
# Skip unrecognized files.
continue
if not args.output:
# No global output lib file, so output a lib file for each input file.
write_lib_file(parts_lib, output_file)
if args.output:
# Only a single lib output file was given, so write library to it after all
# the input files were processed.
write_lib_file(parts_lib, output_file)
# main entrypoint.
if __name__ == "__main__":
main()
|
|
# Copyright 2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This module provides helper functions for Gnome/GLib related
functionality such as gobject-introspection and gresources.'''
import build
import os, sys
import subprocess
from coredata import MesonException
import mlog
class GnomeModule:
def compile_resources(self, state, args, kwargs):
cmd = ['glib-compile-resources', '@INPUT@', '--generate']
if 'source_dir' in kwargs:
d = os.path.join(state.build_to_src, state.subdir, kwargs.pop('source_dir'))
cmd += ['--sourcedir', d]
if 'c_name' in kwargs:
cmd += ['--c-name', kwargs.pop('c_name')]
cmd += ['--target', '@OUTPUT@']
kwargs['command'] = cmd
output_c = args[0] + '.c'
output_h = args[0] + '.h'
kwargs['input'] = args[1]
kwargs['output'] = output_c
target_c = build.CustomTarget(args[0]+'_c', state.subdir, kwargs)
kwargs['output'] = output_h
target_h = build.CustomTarget(args[0] + '_h', state.subdir, kwargs)
return [target_c, target_h]
def generate_gir(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('Gir takes one argument')
girtarget = args[0]
while hasattr(girtarget, 'held_object'):
girtarget = girtarget.held_object
if not isinstance(girtarget, (build.Executable, build.SharedLibrary)):
raise MesonException('Gir target must be an executable or shared library')
pkgstr = subprocess.check_output(['pkg-config', '--cflags', 'gobject-introspection-1.0'])
pkgargs = pkgstr.decode().strip().split()
ns = kwargs.pop('namespace')
nsversion = kwargs.pop('nsversion')
libsources = kwargs.pop('sources')
girfile = '%s-%s.gir' % (ns, nsversion)
depends = [girtarget]
scan_command = ['g-ir-scanner', '@INPUT@']
scan_command += pkgargs
scan_command += ['--namespace='+ns, '--nsversion=' + nsversion, '--warn-all',
'--output', '@OUTPUT@']
extra_args = kwargs.pop('extra_args', [])
if not isinstance(extra_args, list):
extra_args = [extra_args]
scan_command += extra_args
for incdirs in girtarget.include_dirs:
for incdir in incdirs.get_incdirs():
scan_command += ['-I%s' % os.path.join(state.environment.get_source_dir(), incdir)]
if 'link_with' in kwargs:
link_with = kwargs.pop('link_with')
if not isinstance(link_with, list):
link_with = [link_with]
for link in link_with:
lib = link.held_object
scan_command += ['-l%s' % lib.name]
if isinstance(lib, build.SharedLibrary):
scan_command += ['-L%s' %
os.path.join(state.environment.get_build_dir(),
lib.subdir)]
depends.append(lib)
if 'includes' in kwargs:
includes = kwargs.pop('includes')
if isinstance(includes, str):
scan_command += ['--include=%s' % includes]
elif isinstance(includes, list):
scan_command += ['--include=%s' % inc for inc in includes]
else:
raise MesonException('Gir includes must be str or list')
if state.global_args.get('c'):
scan_command += ['--cflags-begin']
scan_command += state.global_args['c']
scan_command += ['--cflags-end']
if kwargs.get('symbol_prefix'):
sym_prefix = kwargs.pop('symbol_prefix')
if not isinstance(sym_prefix, str):
raise MesonException('Gir symbol prefix must be str')
scan_command += ['--symbol-prefix=%s' % sym_prefix]
if kwargs.get('identifier_prefix'):
identifier_prefix = kwargs.pop('identifier_prefix')
if not isinstance(identifier_prefix, str):
raise MesonException('Gir identifier prefix must be str')
scan_command += ['--identifier-prefix=%s' % identifier_prefix]
if kwargs.get('export_packages'):
pkgs = kwargs.pop('export_packages')
if isinstance(pkgs, str):
scan_command += ['--pkg-export=%s' % pkgs]
elif isinstance(pkgs, list):
scan_command += ['--pkg-export=%s' % pkg for pkg in pkgs]
else:
raise MesonException('Gir export packages must be str or list')
deps = None
if 'dependencies' in kwargs:
deps = kwargs.pop('dependencies')
if not isinstance (deps, list):
deps = [deps]
for dep in deps:
girdir = dep.held_object.get_variable ("girdir")
if girdir:
scan_command += ["--add-include-path=%s" % girdir]
inc_dirs = None
if kwargs.get('include_directories'):
inc_dirs = kwargs.pop('include_directories')
if not isinstance(inc_dirs, list):
inc_dirs = [inc_dirs]
for id in inc_dirs:
if isinstance(id.held_object, build.IncludeDirs):
scan_command += ['--add-include-path=%s' % inc for inc in id.held_object.get_incdirs()]
else:
raise MesonException('Gir include dirs should be include_directories()')
if isinstance(girtarget, build.Executable):
scan_command += ['--program', girtarget]
elif isinstance(girtarget, build.SharedLibrary):
scan_command += ["-L", os.path.join (state.environment.get_build_dir(), girtarget.subdir)]
libname = girtarget.get_basename()
scan_command += ['--library', libname]
scankwargs = {'output' : girfile,
'input' : libsources,
'command' : scan_command,
'depends' : depends,
}
if kwargs.get('install'):
scankwargs['install'] = kwargs['install']
scankwargs['install_dir'] = os.path.join(state.environment.get_datadir(), 'gir-1.0')
scan_target = GirTarget(girfile, state.subdir, scankwargs)
typelib_output = '%s-%s.typelib' % (ns, nsversion)
typelib_cmd = ['g-ir-compiler', scan_target, '--output', '@OUTPUT@']
if inc_dirs:
for id in inc_dirs:
typelib_cmd += ['--includedir=%s' % inc for inc in
id.held_object.get_incdirs()]
if deps:
for dep in deps:
girdir = dep.held_object.get_variable ("girdir")
if girdir:
typelib_cmd += ["--includedir=%s" % girdir]
kwargs['output'] = typelib_output
kwargs['command'] = typelib_cmd
# Note that this can't be libdir, because e.g. on Debian it points to
# lib/x86_64-linux-gnu but the girepo dir is always under lib.
kwargs['install_dir'] = 'lib/girepository-1.0'
typelib_target = TypelibTarget(typelib_output, state.subdir, kwargs)
return [scan_target, typelib_target]
def compile_schemas(self, state, args, kwargs):
if len(args) != 0:
raise MesonException('Compile_schemas does not take positional arguments.')
srcdir = os.path.join(state.build_to_src, state.subdir)
outdir = state.subdir
cmd = ['glib-compile-schemas', '--targetdir', outdir, srcdir]
kwargs['command'] = cmd
kwargs['input'] = []
kwargs['output'] = 'gschemas.compiled'
if state.subdir == '':
targetname = 'gsettings-compile'
else:
targetname = 'gsettings-compile-' + state.subdir
target_g = build.CustomTarget(targetname, state.subdir, kwargs)
return target_g
def gtkdoc(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('Gtkdoc must have one positional argument.')
modulename = args[0]
if not isinstance(modulename, str):
raise MesonException('Gtkdoc arg must be string.')
if not 'src_dir' in kwargs:
raise MesonException('Keyword argument src_dir missing.')
main_file = kwargs.get('main_sgml', '')
if not isinstance(main_file, str):
raise MesonException('Main sgml keyword argument must be a string.')
main_xml = kwargs.get('main_xml', '')
if not isinstance(main_xml, str):
raise MesonException('Main xml keyword argument must be a string.')
if main_xml != '':
if main_file != '':
raise MesonException('You can only specify main_xml or main_sgml, not both.')
main_file = main_xml
src_dir = kwargs['src_dir']
targetname = modulename + '-doc'
command = os.path.normpath(os.path.join(os.path.split(__file__)[0], "../gtkdochelper.py"))
if hasattr(src_dir, 'held_object'):
src_dir= src_dir.held_object
if not isinstance(src_dir, build.IncludeDirs):
raise MesonException('Invalidt keyword argument for src_dir.')
incdirs = src_dir.get_incdirs()
if len(incdirs) != 1:
raise MesonException('Argument src_dir has more than one directory specified.')
header_dir = os.path.join(state.environment.get_source_dir(), src_dir.get_curdir(), incdirs[0])
else:
header_dir = os.path.normpath(os.path.join(state.subdir, src_dir))
args = [state.environment.get_source_dir(),
state.environment.get_build_dir(),
state.subdir,
header_dir,
main_file,
modulename]
res = [build.RunTarget(targetname, command, args, state.subdir)]
if kwargs.get('install', True):
res.append(build.InstallScript([command] + args))
return res
def gdbus_codegen(self, state, args, kwargs):
if len(args) != 2:
raise MesonException('Gdbus_codegen takes two arguments, name and xml file.')
namebase = args[0]
xml_file = args[1]
cmd = ['gdbus-codegen']
if 'interface_prefix' in kwargs:
cmd += ['--interface-prefix', kwargs.pop('interface_prefix')]
if 'namespace' in kwargs:
cmd += ['--c-namespace', kwargs.pop('namespace')]
cmd += ['--generate-c-code', os.path.join(state.subdir, namebase), '@INPUT@']
outputs = [namebase + '.c', namebase + '.h']
custom_kwargs = {'input' : xml_file,
'output' : outputs,
'command' : cmd
}
return build.CustomTarget(namebase + '-gdbus', state.subdir, custom_kwargs)
def initialize():
mlog.log('Warning, glib compiled dependencies will not work until this upstream issue is fixed:',
mlog.bold('https://bugzilla.gnome.org/show_bug.cgi?id=745754'))
return GnomeModule()
class GirTarget(build.CustomTarget):
def __init__(self, name, subdir, kwargs):
super().__init__(name, subdir, kwargs)
class TypelibTarget(build.CustomTarget):
def __init__(self, name, subdir, kwargs):
super().__init__(name, subdir, kwargs)
|
|
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin test framework primitive and message structures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization."""
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
# NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_UNSUPPORTED_SERVICE_BIT_5 = (1 << 5)
NODE_UNSUPPORTED_SERVICE_BIT_7 = (1 << 7)
NODE_NETWORK_LIMITED = (1 << 10)
MSG_TX = 1
MSG_BLOCK = 2
MSG_WITNESS_FLAG = 1 << 30
MSG_TYPE_MASK = 0xffffffff >> 2
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress():
def __init__(self):
self.time = 0
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f, with_time=True):
if with_time:
self.time = struct.unpack("<i", f.read(4))[0]
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self, with_time=True):
r = b""
if with_time:
r += struct.pack("<i", self.time)
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv():
typemap = {
0: "Error",
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
2|MSG_WITNESS_FLAG : "WitnessBlock",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator():
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint():
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn():
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut():
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CScriptWitness():
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([bytes_to_hex_str(x) for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness():
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness():
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction():
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_with_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader():
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction():
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs():
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs():
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest():
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions():
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
# Objects that correspond to messages on the wire
class msg_version():
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = NODE_NETWORK | NODE_WITNESS
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f, False)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f, False)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize(False)
r += self.addrFrom.serialize(False)
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack():
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr():
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_inv():
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata():
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks():
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx():
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block():
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize(with_witness=False)
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic():
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr():
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping():
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong():
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool():
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders():
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders():
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers():
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject():
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter():
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct():
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock():
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn():
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn():
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=False)
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
|
|
import mock
import pytest
from addict import Dict
from paasta_tools import utils
from paasta_tools.frameworks import adhoc_scheduler
from paasta_tools.frameworks import native_scheduler
from paasta_tools.frameworks.native_service_config import NativeServiceConfig
from paasta_tools.frameworks.native_service_config import UnknownNativeServiceError
from paasta_tools.frameworks.task_store import DictTaskStore
@pytest.fixture
def system_paasta_config():
return utils.SystemPaastaConfig(
{
"docker_registry": "fake",
"volumes": [],
}, "/fake/system/configs",
)
def make_fake_offer(cpu=50000, mem=50000, port_begin=31000, port_end=32000, pool='default'):
offer = Dict(
agent_id=Dict(value='super_big_slave'),
resources=[
Dict(
name='cpus',
scalar=Dict(value=cpu),
),
Dict(
name='mem',
scalar=Dict(value=mem),
),
Dict(
name='ports',
ranges=Dict(
range=[Dict(begin=port_begin, end=port_end)],
),
),
],
attributes=[],
)
if pool is not None:
offer.attributes = [
Dict(name='pool', text=Dict(value=pool)),
]
return offer
class TestAdhocScheduler(object):
def test_raise_error_when_cmd_missing(self, system_paasta_config):
service_name = "service_name"
instance_name = "instance_name"
cluster = "cluster"
service_configs = [
NativeServiceConfig(
service=service_name,
instance=instance_name,
cluster=cluster,
config_dict={
"cpus": 0.1,
"mem": 50,
"instances": 3,
"drain_method": "test",
},
branch_dict={
'docker_image': 'busybox',
'desired_state': 'start',
},
soa_dir='/nail/etc/services',
),
]
with pytest.raises(UnknownNativeServiceError):
adhoc_scheduler.AdhocScheduler(
service_name=service_name,
instance_name=instance_name,
cluster=cluster,
system_paasta_config=system_paasta_config,
service_config=service_configs[0],
dry_run=False,
reconcile_start_time=0,
staging_timeout=30,
task_store_type=DictTaskStore,
)
@mock.patch('paasta_tools.frameworks.native_scheduler._log', autospec=True)
def test_can_only_launch_task_once(self, mock_log, system_paasta_config):
service_name = "service_name"
instance_name = "instance_name"
cluster = "cluster"
service_configs = [
NativeServiceConfig(
service=service_name,
instance=instance_name,
cluster=cluster,
config_dict={
"cpus": 0.1,
"mem": 50,
"instances": 3,
"cmd": 'sleep 50',
"drain_method": "test",
},
branch_dict={
'docker_image': 'busybox',
'desired_state': 'start',
'force_bounce': None,
},
soa_dir='/nail/etc/services',
),
]
scheduler = adhoc_scheduler.AdhocScheduler(
service_name=service_name,
instance_name=instance_name,
cluster=cluster,
system_paasta_config=system_paasta_config,
service_config=service_configs[0],
dry_run=False,
reconcile_start_time=0,
staging_timeout=30,
task_store_type=DictTaskStore,
)
fake_driver = mock.Mock()
scheduler.registered(
driver=fake_driver,
frameworkId={'value': 'foo'},
masterInfo=mock.Mock(),
)
with mock.patch(
'paasta_tools.utils.load_system_paasta_config', autospec=True,
return_value=system_paasta_config,
):
# Check that offers with invalid pool don't get accepted
tasks, _ = scheduler.tasks_and_state_for_offer(
fake_driver, make_fake_offer(pool='notdefault'), {},
)
assert len(tasks) == 0
tasks, _ = scheduler.tasks_and_state_for_offer(
fake_driver, make_fake_offer(pool=None), {},
)
assert len(tasks) == 0
tasks = scheduler.launch_tasks_for_offers(fake_driver, [make_fake_offer()])
task_id = tasks[0]['task_id']['value']
task_name = tasks[0]['name']
assert len(scheduler.task_store.get_all_tasks()) == 1
assert len(tasks) == 1
assert scheduler.need_more_tasks(task_name, scheduler.task_store.get_all_tasks(), []) is False
assert scheduler.need_to_stop() is False
no_tasks = scheduler.launch_tasks_for_offers(fake_driver, [make_fake_offer()])
assert len(scheduler.task_store.get_all_tasks()) == 1
assert len(no_tasks) == 0
assert scheduler.need_to_stop() is False
scheduler.statusUpdate(
fake_driver,
{'task_id': {'value': task_id}, 'state': native_scheduler.TASK_FINISHED},
)
assert len(scheduler.task_store.get_all_tasks()) == 1
assert scheduler.need_to_stop() is True
@mock.patch('paasta_tools.frameworks.native_scheduler._log', autospec=True)
def test_can_run_multiple_copies(self, mock_log, system_paasta_config):
service_name = "service_name"
instance_name = "instance_name"
cluster = "cluster"
service_configs = [
NativeServiceConfig(
service=service_name,
instance=instance_name,
cluster=cluster,
config_dict={
"cpus": 0.1,
"mem": 50,
"instances": 3,
"cmd": 'sleep 50',
"drain_method": "test",
},
branch_dict={
'docker_image': 'busybox',
'desired_state': 'start',
'force_bounce': None,
},
soa_dir='/nail/etc/services',
),
]
scheduler = adhoc_scheduler.AdhocScheduler(
service_name=service_name,
instance_name=instance_name,
cluster=cluster,
system_paasta_config=system_paasta_config,
service_config=service_configs[0],
dry_run=False,
reconcile_start_time=0,
staging_timeout=30,
service_config_overrides={'instances': 5},
task_store_type=DictTaskStore,
)
fake_driver = mock.Mock()
scheduler.registered(
driver=fake_driver,
frameworkId={'value': 'foo'},
masterInfo=mock.Mock(),
)
with mock.patch(
'paasta_tools.utils.load_system_paasta_config', autospec=True,
return_value=system_paasta_config,
):
tasks = scheduler.launch_tasks_for_offers(fake_driver, [make_fake_offer()])
task_name = tasks[0]['name']
task_ids = [t['task_id']['value'] for t in tasks]
assert len(scheduler.task_store.get_all_tasks()) == 5
assert len(tasks) == 5
assert scheduler.need_more_tasks(task_name, scheduler.task_store.get_all_tasks(), []) is False
assert scheduler.need_to_stop() is False
no_tasks = scheduler.launch_tasks_for_offers(fake_driver, [make_fake_offer()])
assert len(scheduler.task_store.get_all_tasks()) == 5
assert len(no_tasks) == 0
assert scheduler.need_to_stop() is False
for idx, task_id in enumerate(task_ids):
scheduler.statusUpdate(
fake_driver,
{'task_id': {'value': task_id}, 'state': native_scheduler.TASK_FINISHED},
)
assert scheduler.need_to_stop() is (idx == 4)
|
|
try:
from unittest2 import TestCase
from mock import Mock, mock
except ImportError:
from unittest import TestCase
from mock import Mock, mock
import six
from cfn_sphere.exceptions import TemplateErrorException
from cfn_sphere.template import CloudFormationTemplate
from cfn_sphere.template.transformer import CloudFormationTemplateTransformer
class CloudFormationTemplateTransformerTests(TestCase):
def test_scan_dict_keys_executes_key_handler_for_all_matching_keys(self):
dictionary = {'key': 'value'}
handler = Mock()
handler.return_value = 'new-key', 'new-value'
result = CloudFormationTemplateTransformer.scan_dict_keys(dictionary, handler)
expected_calls = [mock.call('key', 'value')]
six.assertCountEqual(self, expected_calls, handler.mock_calls)
self.assertDictEqual(result, {'new-key': 'new-value'})
def test_scan_dict_values_executes_value_handler_for_all_matching_prefixes(self):
dictionary = {'a': 'foo123', 'b': {'c': 'foo234'}}
handler = Mock()
handler.return_value = "foo"
result = CloudFormationTemplateTransformer.scan_dict_values(dictionary, handler)
expected_calls = [mock.call('foo123'), mock.call('foo234')]
six.assertCountEqual(self, expected_calls, handler.mock_calls)
six.assertCountEqual(self, result, {'a': 'foo', 'b': {'c': 'foo'}})
def test_transform_dict_to_yaml_lines_list(self):
result = CloudFormationTemplateTransformer.transform_dict_to_yaml_lines_list({'my-key': 'my-value'})
self.assertEqual([{'Fn::Join': [': ', ['my-key', 'my-value']]}], result)
def test_transform_dict_to_yaml_lines_list_indents_sub_dicts(self):
result = CloudFormationTemplateTransformer.transform_dict_to_yaml_lines_list(
{'my-key': {'my-sub-key': 'value'}})
self.assertEqual(['my-key:', {'Fn::Join': [': ', [' my-sub-key', 'value']]}], result)
def test_transform_dict_to_yaml_lines_list_accepts_integer_values(self):
result = CloudFormationTemplateTransformer.transform_dict_to_yaml_lines_list({'my-key': 3})
self.assertEqual([{'Fn::Join': [': ', ['my-key', 3]]}], result)
def test_transform_dict_to_yaml_lines_list_accepts_list_values(self):
result = CloudFormationTemplateTransformer.transform_dict_to_yaml_lines_list({'my-key': ['a', 'b']})
self.assertEqual(['my-key:', '- a', '- b'], result)
def test_transform_join_key_creates_valid_cfn_join(self):
result = CloudFormationTemplateTransformer.transform_join_key('|join|-', ['a', 'b'])
self.assertEqual(('Fn::Join', ['-', ['a', 'b']]), result)
def test_transform_join_key_accepts_empty_join_string(self):
result = CloudFormationTemplateTransformer.transform_join_key('|join|', ['a', 'b'])
self.assertEqual(('Fn::Join', ['', ['a', 'b']]), result)
def test_transform_join_key_creates_valid_cfn_join_with_multiple_strings(self):
result = CloudFormationTemplateTransformer.transform_join_key('|join|-', ['a', 'b', 'c', 'd', 'e'])
self.assertEqual(('Fn::Join', ['-', ['a', 'b', 'c', 'd', 'e']]), result)
def test_transform_reference_string_creates_valid_cfn_reference(self):
result = CloudFormationTemplateTransformer.transform_reference_string('|ref|my-value')
self.assertEqual({'Ref': 'my-value'}, result)
def test_transform_reference_string_ignores_value_without_reference(self):
result = CloudFormationTemplateTransformer.transform_reference_string('my-value')
self.assertEqual('my-value', result)
def test_transform_reference_string_raises_exception_on_empty_reference(self):
with self.assertRaises(TemplateErrorException):
CloudFormationTemplateTransformer.transform_reference_string('|ref|')
def test_transform_reference_string_ignores_none_values(self):
result = CloudFormationTemplateTransformer.transform_reference_string(None)
self.assertEqual(None, result)
def test_transform_reference_string_ignores_empty_strings(self):
result = CloudFormationTemplateTransformer.transform_reference_string('')
self.assertEqual('', result)
def test_transform_getattr_string_creates_valid_cfn_getattr(self):
result = CloudFormationTemplateTransformer.transform_getattr_string('|getatt|resource|attribute')
self.assertEqual({'Fn::GetAtt': ['resource', 'attribute']}, result)
def test_transform_getattr_string_raises_exception_on_missing_resource(self):
with self.assertRaises(TemplateErrorException):
CloudFormationTemplateTransformer.transform_getattr_string('|getatt|attribute')
def test_transform_getattr_string_ignores_none_values(self):
result = CloudFormationTemplateTransformer.transform_getattr_string(None)
self.assertEqual(None, result)
def test_transform_getattr_string_ignores_empty_strings(self):
result = CloudFormationTemplateTransformer.transform_getattr_string('')
self.assertEqual('', result)
def test_transform_taupage_user_data_key(self):
input = {
"application_id": "stackName",
"application_version": "imageVersion",
"environment": {
"SSO_KEY": "mySsoKey",
"QUEUE_URL": {"ref": "myQueueUrl"}
}
}
expected = {'Fn::Base64':
{
'Fn::Join':
['\n', ['#taupage-ami-config',
{'Fn::Join': [': ', ['application_id', 'stackName']]},
{'Fn::Join': [': ', ['application_version', 'imageVersion']]},
'environment:',
{'Fn::Join': [': ', [' QUEUE_URL', {'ref': 'myQueueUrl'}]]},
{'Fn::Join': [': ', [' SSO_KEY', 'mySsoKey']]}]
]
}
}
key, value = CloudFormationTemplateTransformer.transform_taupage_user_data_key('@taupageUserData@', input)
self.assertEqual("UserData", key)
self.assertEqual(expected, value)
def test_transform_yaml_user_data_key(self):
input = {
"application_id": "stackName",
"application_version": "imageVersion",
"environment": {
"SSO_KEY": "mySsoKey",
"QUEUE_URL": {"ref": "myQueueUrl"}
}
}
expected = {'Fn::Base64':
{'Fn::Join': ['\n', [
{'Fn::Join': [': ', ['application_id', 'stackName']]},
{'Fn::Join': [': ', ['application_version', 'imageVersion']]},
'environment:',
{'Fn::Join': [': ', [' QUEUE_URL', {'ref': 'myQueueUrl'}]]},
{'Fn::Join': [': ', [' SSO_KEY', 'mySsoKey']]}]]
}
}
key, value = CloudFormationTemplateTransformer.transform_yaml_user_data_key('@YamlUserData@', input)
self.assertEqual("UserData", key)
self.assertEqual(expected, value)
def test_transform_dict_to_yaml_lines_list_accepts_multiple_sub_dicts(self):
input = {
"foo": {
'baa': {'key': 'value'}
}
}
expected = [
'foo:',
' baa:',
{'Fn::Join': [': ', [' key', 'value']]}
]
result = CloudFormationTemplateTransformer.transform_dict_to_yaml_lines_list(input)
six.assertCountEqual(self, expected, result)
def test_transform_dict_to_yaml_lines_list_accepts_int_key_value(self):
input = {'ports': {8080: 9000}}
result = CloudFormationTemplateTransformer.transform_dict_to_yaml_lines_list(input)
expected = [
"ports:",
{"Fn::Join": [": ", [" 8080", 9000]]}
]
six.assertCountEqual(self, expected, result)
def test_transform_dict_to_yaml_lines_list_accepts_joins(self):
input = {
"source": {"Fn::Join": [":", ["my-registry/my-app", {"Ref": "appVersion"}]]}
}
expected = [
{
"Fn::Join": [
": ",
[
"source",
{
"Fn::Join": [
":",
[
"my-registry/my-app",
{
"Ref": "appVersion"
}
]
]
}
]
]
}
]
result = CloudFormationTemplateTransformer.transform_dict_to_yaml_lines_list(input)
six.assertCountEqual(self, expected, result)
def test_transform_dict_to_yaml_lines_list_returns_stable_order(self):
input = {'d': 'd', 'a': 'a', 'e': 'e', 'b': {'f': 'f', 'c': 'c', 'a': 'a'}, "#": "3"}
expected = [{'Fn::Join': [': ', ['#', '3']]},
{'Fn::Join': [': ', ['a', 'a']]},
'b:',
{'Fn::Join': [': ', [' a', 'a']]},
{'Fn::Join': [': ', [' c', 'c']]},
{'Fn::Join': [': ', [' f', 'f']]},
{'Fn::Join': [': ', ['d', 'd']]},
{'Fn::Join': [': ', ['e', 'e']]}]
result = CloudFormationTemplateTransformer.transform_dict_to_yaml_lines_list(input)
self.assertEqual(expected, result)
def test_transform_kv_to_cfn_join_accepts_int_key_value(self):
result = CloudFormationTemplateTransformer.transform_kv_to_cfn_join(8080, 9000)
expected = {'Fn::Join': [': ', [8080, 9000]]}
self.assertEqual(expected, result)
def test_transform_kv_to_cfn_join_quotes_strings_with_colons(self):
result = CloudFormationTemplateTransformer.transform_kv_to_cfn_join('f:b', 'foo:baa')
expected = {'Fn::Join': [': ', ["'f:b'", "'foo:baa'"]]}
self.assertEqual(expected, result)
def test_transform_template_properly_renders_dict(self):
template_dict = {
'Resources': {
'key1': '|ref|value',
'key2': '|getatt|resource|attribute',
'@TaupageUserData@':
{
'key1': 'value',
'key2': {'ref': 'value'},
'key3': {'|join|.': ['a', 'b', 'c']}
}
}}
result = CloudFormationTemplateTransformer.transform_template(CloudFormationTemplate(template_dict, 'foo'))
expected = {
"key1": {
"Ref": "value"
},
"key2": {
"Fn::GetAtt": [
"resource",
"attribute"
]
},
"UserData": {
"Fn::Base64": {
"Fn::Join": [
"\n",
[
"#taupage-ami-config",
{
"Fn::Join": [
": ",
[
"key1",
"value"
]
]
},
{
"Fn::Join": [
": ",
[
"key2",
{
"ref": "value"
}
]
]
},
{
"Fn::Join": [
": ",
[
"key3",
{
"Fn::Join": [
".",
[
"a",
"b",
"c"
]
]
}
]
]
}
]
]
}
}
}
six.assertCountEqual(self, expected, result.resources)
def test_transform_template_transforms_references_in_conditions_section(self):
template_dict = {
'Conditions': {'key1': ["|ref|foo", "a", "b"], "key2": "|Ref|baa"}
}
result = CloudFormationTemplateTransformer.transform_template(CloudFormationTemplate(template_dict, 'foo'))
expected = {'key1': [{'Ref': 'foo'}, 'a', 'b'], 'key2': {'Ref': 'baa'}}
self.assertEqual(expected, result.conditions)
def test_transform_template_transforms_list_values(self):
template_dict = {
'Resources': {'key1': ["|ref|foo", "a", "b"]}
}
result = CloudFormationTemplateTransformer.transform_template(CloudFormationTemplate(template_dict, 'foo'))
expected = {'key1': [{'Ref': 'foo'}, 'a', 'b']}
self.assertEqual(expected, result.resources)
def test_transform_template_transforms_dict_list_items(self):
template_dict = {
'Resources': {'key1': {'key2': [{'key3': 'value3', 'foo': {'|Join|': ['a', 'b']}}]}}
}
result = CloudFormationTemplateTransformer.transform_template(CloudFormationTemplate(template_dict, 'foo'))
expected = {'key1': {'key2': [{'foo': {'Fn::Join': ['', ['a', 'b']]}, 'key3': 'value3'}]}}
six.assertCountEqual(self, expected, result.resources)
def test_transform_template_transforms_join_with_embedded_ref(self):
template_dict = {
'Resources': {'key1': {"|join|.": ["|ref|foo", "b"]}}
}
result = CloudFormationTemplateTransformer.transform_template(CloudFormationTemplate(template_dict, 'foo'))
expected = {'key1': {'Fn::Join': ['.', [{'Ref': 'foo'}, 'b']]}}
self.assertEqual(expected, result.resources)
def test_transform_template_raises_exception_on_unknown_reference_value(self):
template_dict = {
'Resources': {'key1': "|foo|foo"}
}
with self.assertRaises(TemplateErrorException):
CloudFormationTemplateTransformer.transform_template(CloudFormationTemplate(template_dict, 'foo'))
def test_transform_template_raises_exception_on_unknown_reference_key(self):
template_dict = {
'Resources': {'|key|': "foo"}
}
with self.assertRaises(TemplateErrorException):
CloudFormationTemplateTransformer.transform_template(CloudFormationTemplate(template_dict, 'foo'))
def test_transform_template_raises_exception_on_unknown_at_reference_key(self):
template_dict = {
'Resources': {'@foo@': "foo"}
}
with self.assertRaises(TemplateErrorException):
CloudFormationTemplateTransformer.transform_template(CloudFormationTemplate(template_dict, 'foo'))
def test_transform_template_raises_exception_on_embedded_reference(self):
template_dict = {
'Resources': {'key1': {"foo": ["|foo|foo", "b"]}}
}
with self.assertRaises(TemplateErrorException):
CloudFormationTemplateTransformer.transform_template(CloudFormationTemplate(template_dict, 'foo'))
def test_check_for_leftover_reference_values_raises_exception_on_existing_reference(self):
with self.assertRaises(TemplateErrorException):
CloudFormationTemplateTransformer.check_for_leftover_reference_values('|Ref|foo')
def test_check_for_leftover_reference_values_raises_exception_on_references_in_list_values(self):
with self.assertRaises(TemplateErrorException):
CloudFormationTemplateTransformer.check_for_leftover_reference_values(['a', '|Ref|foo', 'b'])
def test_check_for_leftover_reference_values_properly_returns_values_without_reference(self):
self.assertEqual('foo', CloudFormationTemplateTransformer.check_for_leftover_reference_values('foo'))
def test_check_for_leftover_reference_values_properly_returns_empty_values(self):
self.assertEqual('', CloudFormationTemplateTransformer.check_for_leftover_reference_values(''))
def test_check_for_leftover_reference_keys_raises_exception_on_existing_at_reference(self):
with self.assertRaises(TemplateErrorException):
CloudFormationTemplateTransformer.check_for_leftover_reference_keys('@Foo@', 'foo')
def test_check_for_leftover_reference_keys_raises_exception_on_existing_pipe_reference(self):
with self.assertRaises(TemplateErrorException):
CloudFormationTemplateTransformer.check_for_leftover_reference_keys('|foo|', 'foo')
def test_check_for_leftover_reference_keys_properly_returns_values_without_reference(self):
self.assertEqual(('key', 'value'),
CloudFormationTemplateTransformer.check_for_leftover_reference_keys('key', 'value'))
def test_check_for_leftover_reference_keys_properly_returns_empty_values(self):
self.assertEqual(('', ''), CloudFormationTemplateTransformer.check_for_leftover_reference_keys('', ''))
|
|
import hashlib
import json
import os
import subprocess
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.http import (HttpResponse, HttpResponseBadRequest,
HttpResponseNotFound, HttpResponseServerError)
from django.shortcuts import render
from django.template import RequestContext
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt, requires_csrf_token
from django.views.decorators.http import etag, require_POST
from django.views.generic.base import TemplateView
import commonware.log
import jingo_minify
import waffle
from jingo.helpers import urlparams
from django_statsd.clients import statsd
from django_statsd.views import record as django_statsd_record
from mkt.carriers import get_carrier
from mkt.detail.views import manifest as mini_manifest
from mkt.site import monitors
from mkt.site.context_processors import get_collect_timings
from mkt.site.helpers import media
from mkt.site.utils import log_cef
log = commonware.log.getLogger('z.mkt.site')
# This can be called when CsrfViewMiddleware.process_view has not run,
# therefore needs @requires_csrf_token in case the template needs
# {% csrf_token %}.
@requires_csrf_token
def handler403(request):
# TODO: Bug 793241 for different 403 templates at different URL paths.
return render(request, 'site/403.html', status=403)
def handler404(request):
if request.path_info.startswith('/api/'):
# Pass over to API handler404 view if API was targeted.
return HttpResponseNotFound()
else:
return render(request, 'site/404.html', status=404)
def handler500(request):
if request.path_info.startswith('/api/'):
# Pass over to API handler500 view if API was targeted.
return HttpResponseServerError()
else:
return render(request, 'site/500.html', status=500)
def csrf_failure(request, reason=''):
return render(request, 'site/403.html',
{'because_csrf': 'CSRF' in reason}, status=403)
def manifest(request):
ctx = RequestContext(request)
data = {
'name': getattr(settings, 'WEBAPP_MANIFEST_NAME',
'Firefox Marketplace'),
'description': 'The Firefox Marketplace',
'developer': {
'name': 'Mozilla',
'url': 'http://mozilla.org',
},
'icons': {
# Using the default addon image until we get a marketplace logo.
'128': media(ctx, 'img/mkt/logos/128.png'),
'64': media(ctx, 'img/mkt/logos/64.png'),
'32': media(ctx, 'img/mkt/logos/32.png'),
},
'activities': {
'marketplace-app': {'href': '/'},
'marketplace-app-rating': {'href': '/'},
'marketplace-category': {'href': '/'},
'marketplace-search': {'href': '/'},
}
}
if get_carrier():
data['launch_path'] = urlparams('/', carrier=get_carrier())
manifest_content = json.dumps(data)
manifest_etag = hashlib.sha256(manifest_content).hexdigest()
@etag(lambda r: manifest_etag)
def _inner_view(request):
response = HttpResponse(
manifest_content,
content_type='application/x-web-app-manifest+json')
return response
return _inner_view(request)
def serve_contribute(request):
filename = os.path.join(settings.ROOT, 'contribute.json')
with open(filename) as fd:
content = fd.read()
return HttpResponse(content, content_type='application/json')
def package_minifest(request):
"""Serve mini manifest ("minifest") for Yulelog's packaged `.zip`."""
if not settings.MARKETPLACE_GUID:
return HttpResponseNotFound()
return mini_manifest(request, settings.MARKETPLACE_GUID)
def yogafire_minifest(request):
"""Serve mini manifest ("minifest") for Yogafire's packaged `.zip`."""
if not settings.YOGAFIRE_GUID:
return HttpResponseNotFound()
return mini_manifest(request, settings.YOGAFIRE_GUID)
def robots(request):
"""Generate a `robots.txt`."""
template = render(request, 'site/robots.txt')
return HttpResponse(template, content_type='text/plain')
@csrf_exempt
@require_POST
def record(request):
# The rate limiting is done up on the client, but if things go wrong
# we can just turn the percentage down to zero.
if get_collect_timings():
return django_statsd_record(request)
raise PermissionDenied
@statsd.timer('mkt.mozmarket.minify')
def minify_js(js):
if settings.UGLIFY_BIN:
log.info('minifying JS with uglify')
return _minify_js_with_uglify(js)
else:
# The YUI fallback here is important
# because YUI compressor is bundled with jingo
# minify and therefore doesn't require any deps.
log.info('minifying JS with YUI')
return _minify_js_with_yui(js)
def _minify_js_with_uglify(js):
sp = _open_pipe([settings.UGLIFY_BIN])
js, err = sp.communicate(js)
if sp.returncode != 0:
raise ValueError('Compressing JS with uglify failed; error: %s'
% err.strip())
return js
def _minify_js_with_yui(js):
jar = os.path.join(os.path.dirname(jingo_minify.__file__), 'bin',
'yuicompressor-2.4.7.jar')
if not os.path.exists(jar):
raise ValueError('Could not find YUI compressor; tried %r' % jar)
sp = _open_pipe([settings.JAVA_BIN, '-jar', jar, '--type', 'js',
'--charset', 'utf8'])
js, err = sp.communicate(js)
if sp.returncode != 0:
raise ValueError('Compressing JS with YUI failed; error: %s'
% err.strip())
return js
def _open_pipe(cmd):
return subprocess.Popen(cmd,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
class OpensearchView(TemplateView):
content_type = 'text/xml'
template_name = 'mkt/opensearch.xml'
@never_cache
def monitor(request, format=None):
# For each check, a boolean pass/fail status to show in the template
status_summary = {}
results = {}
checks = ['memcache', 'libraries', 'elastic', 'package_signer', 'path',
'receipt_signer', 'settings_check', 'solitude']
for check in checks:
with statsd.timer('monitor.%s' % check) as timer:
status, result = getattr(monitors, check)()
# state is a string. If it is empty, that means everything is fine.
status_summary[check] = {'state': not status,
'status': status}
results['%s_results' % check] = result
results['%s_timer' % check] = timer.ms
# If anything broke, send HTTP 500.
status_code = 200 if all(a['state']
for a in status_summary.values()) else 500
if format == '.json':
return HttpResponse(json.dumps(status_summary), status=status_code)
ctx = {}
ctx.update(results)
ctx['status_summary'] = status_summary
return render(request, 'services/monitor.html', ctx, status=status_code)
def loaded(request):
return HttpResponse('%s' % request.META['wsgi.loaded'],
content_type='text/plain')
@csrf_exempt
@require_POST
def cspreport(request):
"""Accept CSP reports and log them."""
report = ('blocked-uri', 'violated-directive', 'original-policy')
if not waffle.sample_is_active('csp-store-reports'):
return HttpResponse()
try:
v = json.loads(request.body)['csp-report']
# If possible, alter the PATH_INFO to contain the request of the page
# the error occurred on, spec: http://mzl.la/P82R5y
meta = request.META.copy()
meta['PATH_INFO'] = v.get('document-uri', meta['PATH_INFO'])
v = [(k, v[k]) for k in report if k in v]
log_cef('CSPViolation', 5, meta,
signature='CSPREPORT',
msg='A client reported a CSP violation',
cs6=v, cs6Label='ContentPolicy')
except (KeyError, ValueError), e:
log.debug('Exception in CSP report: %s' % e, exc_info=True)
return HttpResponseBadRequest()
return HttpResponse()
|
|
# All fields except for BlobField written by Jonas Haag <jonas@lophus.org>
from django.db import models
from django.core.exceptions import ValidationError
from django.utils.importlib import import_module
__all__ = ('RawField', 'ListField', 'DictField', 'SetField',
'BlobField', 'EmbeddedModelField')
class _HandleAssignment(object):
"""
A placeholder class that provides a way to set the attribute on the model.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance.')
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
class RawField(models.Field):
""" Generic field to store anything your database backend allows you to. """
def get_internal_type(self):
return 'RawField'
class AbstractIterableField(models.Field):
"""
Abstract field for fields for storing iterable data type like ``list``,
``set`` and ``dict``.
You can pass an instance of a field as the first argument.
If you do, the iterable items will be piped through the passed field's
validation and conversion routines, converting the items to the
appropriate data type.
"""
def __init__(self, item_field=None, *args, **kwargs):
if item_field is None:
item_field = RawField()
self.item_field = item_field
default = kwargs.get('default', None if kwargs.get('null') else ())
if default is not None and not callable(default):
# ensure a new object is created every time the default is accessed
kwargs['default'] = lambda: self._type(default)
super(AbstractIterableField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name):
self.item_field.model = cls
self.item_field.name = name
super(AbstractIterableField, self).contribute_to_class(cls, name)
metaclass = getattr(self.item_field, '__metaclass__', None)
if issubclass(metaclass, models.SubfieldBase):
setattr(cls, self.name, _HandleAssignment(self))
def db_type(self, connection):
item_db_type = self.item_field.db_type(connection=connection)
return '%s:%s' % (self.__class__.__name__, item_db_type)
def _convert(self, func, values, *args, **kwargs):
if isinstance(values, (list, tuple, set)):
return self._type(func(value, *args, **kwargs) for value in values)
return values
def to_python(self, value):
return self._convert(self.item_field.to_python, value)
def pre_save(self, model_instance, add):
class fake_instance(object):
pass
fake_instance = fake_instance()
def wrapper(value):
assert not hasattr(self.item_field, 'attname')
fake_instance.value = value
self.item_field.attname = 'value'
try:
return self.item_field.pre_save(fake_instance, add)
finally:
del self.item_field.attname
return self._convert(wrapper, getattr(model_instance, self.attname))
def get_db_prep_value(self, value, connection, prepared=False):
return self._convert(self.item_field.get_db_prep_value, value,
connection=connection, prepared=prepared)
def get_db_prep_save(self, value, connection):
return self._convert(self.item_field.get_db_prep_save,
value, connection=connection)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
# TODO/XXX: Remove as_lookup_value() once we have a cleaner solution
# for dot-notation queries
if hasattr(value, 'as_lookup_value'):
value = value.as_lookup_value(self, lookup_type, connection)
return self.item_field.get_db_prep_lookup(lookup_type, value,
connection=connection, prepared=prepared)
def validate(self, values, model_instance):
try:
iter(values)
except TypeError:
raise ValidationError('Value of type %r is not iterable' % type(values))
def formfield(self, **kwargs):
raise NotImplementedError('No form field implemented for %r' % type(self))
class ListField(AbstractIterableField):
"""
Field representing a Python ``list``.
If the optional keyword argument `ordering` is given, it must be a callable
that is passed to :meth:`list.sort` as `key` argument. If `ordering` is
given, the items in the list will be sorted before sending them to the
database.
"""
_type = list
def __init__(self, *args, **kwargs):
self.ordering = kwargs.pop('ordering', None)
if self.ordering is not None and not callable(self.ordering):
raise TypeError("'ordering' has to be a callable or None, "
"not of type %r" % type(self.ordering))
super(ListField, self).__init__(*args, **kwargs)
def _convert(self, func, values, *args, **kwargs):
values = super(ListField, self)._convert(func, values, *args, **kwargs)
if values is not None and self.ordering is not None:
values.sort(key=self.ordering)
return values
class SetField(AbstractIterableField):
"""
Field representing a Python ``set``.
"""
_type = set
class DictField(AbstractIterableField):
"""
Field representing a Python ``dict``.
The field type conversions described in :class:`AbstractIterableField`
only affect values of the dictionary, not keys.
Depending on the backend, keys that aren't strings might not be allowed.
"""
_type = dict
def _convert(self, func, values, *args, **kwargs):
if values is None:
return None
return dict((key, func(value, *args, **kwargs))
for key, value in values.iteritems())
def validate(self, values, model_instance):
if not isinstance(values, dict):
raise ValidationError('Value is of type %r. Should be a dict.' % type(values))
class BlobField(models.Field):
"""
A field for storing blobs of binary data.
The value might either be a string (or something that can be converted to
a string), or a file-like object.
In the latter case, the object has to provide a ``read`` method from which
the blob is read.
"""
def get_internal_type(self):
return 'BlobField'
def formfield(self, **kwargs):
# A file widget is provided, but use model FileField or ImageField
# for storing specific files most of the time
from .widgets import BlobWidget
from django.forms import FileField
defaults = {'form_class': FileField, 'widget': BlobWidget}
defaults.update(kwargs)
return super(BlobField, self).formfield(**defaults)
def get_db_prep_value(self, value, connection, prepared=False):
if hasattr(value, 'read'):
return value.read()
else:
return str(value)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
raise TypeError("BlobFields do not support lookups")
def value_to_string(self, obj):
return str(self._get_val_from_obj(obj))
class EmbeddedModelField(models.Field):
"""
Field that allows you to embed a model instance.
:param model: (optional) The model class that shall be embedded
(may also be passed as string similar to relation fields)
"""
__metaclass__ = models.SubfieldBase
def __init__(self, embedded_model=None, *args, **kwargs):
self.embedded_model = embedded_model
kwargs.setdefault('default', None)
super(EmbeddedModelField, self).__init__(*args, **kwargs)
def db_type(self, connection):
return 'DictField:RawField'
def _set_model(self, model):
# EmbeddedModelFields are not contribute[d]_to_class if using within
# ListFields (and friends), so we can only know the model field is
# used in when the IterableField sets our 'model' attribute in its
# contribute_to_class method.
# We need to know the model to generate a valid key for the lookup.
if model is not None and isinstance(self.embedded_model, basestring):
# The model argument passed to __init__ was a string, so we need
# to make sure to resolve that string to the corresponding model
# class, similar to relation fields. We abuse some of the
# relation fields' code to do the lookup here:
def _resolve_lookup(self_, resolved_model, model):
self.embedded_model = resolved_model
from django.db.models.fields.related import add_lazy_relation
add_lazy_relation(model, self, self.embedded_model, _resolve_lookup)
self._model = model
model = property(lambda self:self._model, _set_model)
def pre_save(self, model_instance, add):
embedded_instance = super(EmbeddedModelField, self).pre_save(model_instance, add)
if embedded_instance is None:
return None, None
if self.embedded_model is not None and \
not isinstance(embedded_instance, self.embedded_model):
raise TypeError("Expected instance of type %r, not %r"
% (type(self.embedded_model), type(embedded_instance)))
data = dict((field.name, field.pre_save(embedded_instance, add))
for field in embedded_instance._meta.fields)
return embedded_instance, data
def get_db_prep_value(self, (embedded_instance, embedded_dict), **kwargs):
if embedded_dict is None:
return None
values = {}
for name, value in embedded_dict.iteritems():
field = embedded_instance._meta.get_field(name)
values[field.column] = field.get_db_prep_value(value, **kwargs)
if self.embedded_model is None:
values.update({'_module' : embedded_instance.__class__.__module__,
'_model' : embedded_instance.__class__.__name__})
return values
# TODO/XXX: Remove this once we have a cleaner solution
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
if hasattr(value, 'as_lookup_value'):
value = value.as_lookup_value(self, lookup_type, connection)
return value
def to_python(self, values):
if not isinstance(values, dict):
return values
module, model = values.pop('_module', None), values.pop('_model', None)
if module is not None:
return getattr(import_module(module), model)(**values)
return self.embedded_model(**values)
|
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from python_pachyderm.proto.v2.identity import identity_pb2 as python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2
class APIStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SetIdentityServerConfig = channel.unary_unary(
'/identity_v2.API/SetIdentityServerConfig',
request_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.SetIdentityServerConfigRequest.SerializeToString,
response_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.SetIdentityServerConfigResponse.FromString,
)
self.GetIdentityServerConfig = channel.unary_unary(
'/identity_v2.API/GetIdentityServerConfig',
request_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.GetIdentityServerConfigRequest.SerializeToString,
response_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.GetIdentityServerConfigResponse.FromString,
)
self.CreateIDPConnector = channel.unary_unary(
'/identity_v2.API/CreateIDPConnector',
request_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.CreateIDPConnectorRequest.SerializeToString,
response_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.CreateIDPConnectorResponse.FromString,
)
self.UpdateIDPConnector = channel.unary_unary(
'/identity_v2.API/UpdateIDPConnector',
request_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.UpdateIDPConnectorRequest.SerializeToString,
response_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.UpdateIDPConnectorResponse.FromString,
)
self.ListIDPConnectors = channel.unary_unary(
'/identity_v2.API/ListIDPConnectors',
request_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.ListIDPConnectorsRequest.SerializeToString,
response_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.ListIDPConnectorsResponse.FromString,
)
self.GetIDPConnector = channel.unary_unary(
'/identity_v2.API/GetIDPConnector',
request_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.GetIDPConnectorRequest.SerializeToString,
response_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.GetIDPConnectorResponse.FromString,
)
self.DeleteIDPConnector = channel.unary_unary(
'/identity_v2.API/DeleteIDPConnector',
request_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.DeleteIDPConnectorRequest.SerializeToString,
response_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.DeleteIDPConnectorResponse.FromString,
)
self.CreateOIDCClient = channel.unary_unary(
'/identity_v2.API/CreateOIDCClient',
request_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.CreateOIDCClientRequest.SerializeToString,
response_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.CreateOIDCClientResponse.FromString,
)
self.UpdateOIDCClient = channel.unary_unary(
'/identity_v2.API/UpdateOIDCClient',
request_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.UpdateOIDCClientRequest.SerializeToString,
response_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.UpdateOIDCClientResponse.FromString,
)
self.GetOIDCClient = channel.unary_unary(
'/identity_v2.API/GetOIDCClient',
request_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.GetOIDCClientRequest.SerializeToString,
response_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.GetOIDCClientResponse.FromString,
)
self.ListOIDCClients = channel.unary_unary(
'/identity_v2.API/ListOIDCClients',
request_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.ListOIDCClientsRequest.SerializeToString,
response_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.ListOIDCClientsResponse.FromString,
)
self.DeleteOIDCClient = channel.unary_unary(
'/identity_v2.API/DeleteOIDCClient',
request_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.DeleteOIDCClientRequest.SerializeToString,
response_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.DeleteOIDCClientResponse.FromString,
)
self.DeleteAll = channel.unary_unary(
'/identity_v2.API/DeleteAll',
request_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.DeleteAllRequest.SerializeToString,
response_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.DeleteAllResponse.FromString,
)
class APIServicer(object):
"""Missing associated documentation comment in .proto file."""
def SetIdentityServerConfig(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetIdentityServerConfig(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateIDPConnector(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateIDPConnector(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListIDPConnectors(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetIDPConnector(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteIDPConnector(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateOIDCClient(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateOIDCClient(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetOIDCClient(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListOIDCClients(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteOIDCClient(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteAll(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_APIServicer_to_server(servicer, server):
rpc_method_handlers = {
'SetIdentityServerConfig': grpc.unary_unary_rpc_method_handler(
servicer.SetIdentityServerConfig,
request_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.SetIdentityServerConfigRequest.FromString,
response_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.SetIdentityServerConfigResponse.SerializeToString,
),
'GetIdentityServerConfig': grpc.unary_unary_rpc_method_handler(
servicer.GetIdentityServerConfig,
request_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.GetIdentityServerConfigRequest.FromString,
response_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.GetIdentityServerConfigResponse.SerializeToString,
),
'CreateIDPConnector': grpc.unary_unary_rpc_method_handler(
servicer.CreateIDPConnector,
request_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.CreateIDPConnectorRequest.FromString,
response_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.CreateIDPConnectorResponse.SerializeToString,
),
'UpdateIDPConnector': grpc.unary_unary_rpc_method_handler(
servicer.UpdateIDPConnector,
request_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.UpdateIDPConnectorRequest.FromString,
response_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.UpdateIDPConnectorResponse.SerializeToString,
),
'ListIDPConnectors': grpc.unary_unary_rpc_method_handler(
servicer.ListIDPConnectors,
request_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.ListIDPConnectorsRequest.FromString,
response_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.ListIDPConnectorsResponse.SerializeToString,
),
'GetIDPConnector': grpc.unary_unary_rpc_method_handler(
servicer.GetIDPConnector,
request_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.GetIDPConnectorRequest.FromString,
response_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.GetIDPConnectorResponse.SerializeToString,
),
'DeleteIDPConnector': grpc.unary_unary_rpc_method_handler(
servicer.DeleteIDPConnector,
request_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.DeleteIDPConnectorRequest.FromString,
response_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.DeleteIDPConnectorResponse.SerializeToString,
),
'CreateOIDCClient': grpc.unary_unary_rpc_method_handler(
servicer.CreateOIDCClient,
request_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.CreateOIDCClientRequest.FromString,
response_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.CreateOIDCClientResponse.SerializeToString,
),
'UpdateOIDCClient': grpc.unary_unary_rpc_method_handler(
servicer.UpdateOIDCClient,
request_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.UpdateOIDCClientRequest.FromString,
response_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.UpdateOIDCClientResponse.SerializeToString,
),
'GetOIDCClient': grpc.unary_unary_rpc_method_handler(
servicer.GetOIDCClient,
request_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.GetOIDCClientRequest.FromString,
response_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.GetOIDCClientResponse.SerializeToString,
),
'ListOIDCClients': grpc.unary_unary_rpc_method_handler(
servicer.ListOIDCClients,
request_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.ListOIDCClientsRequest.FromString,
response_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.ListOIDCClientsResponse.SerializeToString,
),
'DeleteOIDCClient': grpc.unary_unary_rpc_method_handler(
servicer.DeleteOIDCClient,
request_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.DeleteOIDCClientRequest.FromString,
response_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.DeleteOIDCClientResponse.SerializeToString,
),
'DeleteAll': grpc.unary_unary_rpc_method_handler(
servicer.DeleteAll,
request_deserializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.DeleteAllRequest.FromString,
response_serializer=python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.DeleteAllResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'identity_v2.API', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class API(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def SetIdentityServerConfig(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/identity_v2.API/SetIdentityServerConfig',
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.SetIdentityServerConfigRequest.SerializeToString,
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.SetIdentityServerConfigResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetIdentityServerConfig(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/identity_v2.API/GetIdentityServerConfig',
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.GetIdentityServerConfigRequest.SerializeToString,
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.GetIdentityServerConfigResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateIDPConnector(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/identity_v2.API/CreateIDPConnector',
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.CreateIDPConnectorRequest.SerializeToString,
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.CreateIDPConnectorResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateIDPConnector(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/identity_v2.API/UpdateIDPConnector',
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.UpdateIDPConnectorRequest.SerializeToString,
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.UpdateIDPConnectorResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListIDPConnectors(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/identity_v2.API/ListIDPConnectors',
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.ListIDPConnectorsRequest.SerializeToString,
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.ListIDPConnectorsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetIDPConnector(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/identity_v2.API/GetIDPConnector',
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.GetIDPConnectorRequest.SerializeToString,
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.GetIDPConnectorResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteIDPConnector(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/identity_v2.API/DeleteIDPConnector',
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.DeleteIDPConnectorRequest.SerializeToString,
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.DeleteIDPConnectorResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateOIDCClient(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/identity_v2.API/CreateOIDCClient',
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.CreateOIDCClientRequest.SerializeToString,
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.CreateOIDCClientResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateOIDCClient(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/identity_v2.API/UpdateOIDCClient',
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.UpdateOIDCClientRequest.SerializeToString,
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.UpdateOIDCClientResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetOIDCClient(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/identity_v2.API/GetOIDCClient',
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.GetOIDCClientRequest.SerializeToString,
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.GetOIDCClientResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListOIDCClients(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/identity_v2.API/ListOIDCClients',
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.ListOIDCClientsRequest.SerializeToString,
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.ListOIDCClientsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteOIDCClient(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/identity_v2.API/DeleteOIDCClient',
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.DeleteOIDCClientRequest.SerializeToString,
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.DeleteOIDCClientResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteAll(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/identity_v2.API/DeleteAll',
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.DeleteAllRequest.SerializeToString,
python__pachyderm_dot_proto_dot_v2_dot_identity_dot_identity__pb2.DeleteAllResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
import random
from frappe.model.document import Document
class DesktopIcon(Document):
def validate(self):
if not self.label:
self.label = self.module_name
def on_trash(self):
clear_desktop_icons_cache()
def after_doctype_insert():
frappe.db.add_unique('Desktop Icon', ('module_name', 'owner', 'standard'))
def get_desktop_icons(user=None):
'''Return desktop icons for user'''
if not user:
user = frappe.session.user
user_icons = frappe.cache().hget('desktop_icons', user)
if not user_icons:
fields = ['module_name', 'hidden', 'label', 'link', 'type', 'icon', 'color',
'_doctype', 'idx', 'force_show', 'reverse', 'custom', 'standard', 'blocked']
standard_icons = frappe.db.get_all('Desktop Icon',
fields=fields, filters={'standard': 1})
standard_map = {}
for icon in standard_icons:
standard_map[icon.module_name] = icon
user_icons = frappe.db.get_all('Desktop Icon', fields=fields,
filters={'standard': 0, 'owner': user})
# update hidden property
for icon in user_icons:
standard_icon = standard_map.get(icon.module_name, None)
# override properties from standard icon
if standard_icon:
for key in ('route', 'label', 'color', 'icon', 'link'):
if standard_icon.get(key):
icon[key] = standard_icon.get(key)
if standard_icon.blocked:
icon.hidden = 1
# flag for modules_setup page
icon.hidden_in_standard = 1
elif standard_icon.force_show:
icon.hidden = 0
# add missing standard icons (added via new install apps?)
user_icon_names = [icon.module_name for icon in user_icons]
for standard_icon in standard_icons:
if standard_icon.module_name not in user_icon_names:
# flag for modules_setup page
standard_icon.hidden_in_standard = standard_icon.hidden
# if blocked, hidden too!
if standard_icon.blocked:
standard_icon.hidden = 1
user_icons.append(standard_icon)
user_blocked_modules = frappe.get_doc('User', user).get_blocked_modules()
for icon in user_icons:
if icon.module_name in user_blocked_modules:
icon.hidden = 1
# sort by idx
user_icons.sort(lambda a, b: 1 if a.idx > b.idx else -1)
frappe.cache().hset('desktop_icons', user, user_icons)
return user_icons
@frappe.whitelist()
def add_user_icon(_doctype, label=None, link=None, type='link', standard=0):
'''Add a new user desktop icon to the desktop'''
if not label: label = frappe._(_doctype)
if not link: link = 'List/{0}'.format(_doctype)
icon_name = frappe.db.exists('Desktop Icon', {'standard': standard, 'link': link,
'owner': frappe.session.user})
if icon_name and frappe.db.get_value('Desktop Icon', icon_name, 'hidden'):
frappe.db.set_value('Desktop Icon', icon_name, 'hidden', 0)
clear_desktop_icons_cache()
elif not icon_name:
idx = frappe.db.sql('select max(idx) from `tabDesktop Icon` where owner=%s',
frappe.session.user)[0][0] or \
frappe.db.sql('select count(*) from `tabDesktop Icon` where standard=1')[0][0]
module = frappe.db.get_value('DocType', _doctype, 'module')
module_icon = frappe.get_value('Desktop Icon', {'standard':1, 'module_name':module},
['name', 'icon', 'color', 'reverse'], as_dict=True)
if not module_icon:
module_icon = frappe._dict()
opts = random.choice(palette)
module_icon.color = opts[0]
module_icon.reverse = 0 if (len(opts) > 1) else 1
try:
new_icon = frappe.get_doc({
'doctype': 'Desktop Icon',
'label': label,
'module_name': _doctype,
'link': link,
'type': type,
'_doctype': _doctype,
'icon': module_icon.icon,
'color': module_icon.color,
'reverse': module_icon.reverse,
'idx': idx + 1,
'custom': 1,
'standard': standard
}).insert(ignore_permissions=True)
clear_desktop_icons_cache()
return new_icon.name
except Exception, e:
raise e
else:
return icon_name
@frappe.whitelist()
def set_order(new_order, user=None):
'''set new order by duplicating user icons (if user is set) or set global order'''
if isinstance(new_order, basestring):
new_order = json.loads(new_order)
for i, module_name in enumerate(new_order):
if module_name not in ('Explore',):
if user:
icon = get_user_copy(module_name, user)
else:
name = frappe.db.get_value('Desktop Icon',
{'standard': 1, 'module_name': module_name})
if name:
icon = frappe.get_doc('Desktop Icon', name)
else:
# standard icon missing, create one for DocType
name = add_user_icon(module_name, standard=1)
icon = frappe.get_doc('Desktop Icon', name)
icon.db_set('idx', i)
clear_desktop_icons_cache()
def set_desktop_icons(visible_list):
'''Resets all lists and makes only the given one standard,
if the desktop icon does not exist and the name is a DocType, then will create
an icon for the doctype'''
# clear all custom
frappe.db.sql('delete from `tabDesktop Icon` where standard=0')
# set all as blocked
frappe.db.sql('update `tabDesktop Icon` set blocked=0, hidden=1')
# set as visible if present, or add icon
for module_name in visible_list:
name = frappe.db.get_value('Desktop Icon', {'module_name': module_name})
if name:
frappe.db.set_value('Desktop Icon', name, 'hidden', 0)
else:
if frappe.db.exists('DocType', module_name):
icon_name = add_user_icon(module_name)
# make it standard
frappe.db.set_value('Desktop Icon', icon_name, 'standard', 1)
# set the order
set_order(visible_list)
clear_desktop_icons_cache()
def set_hidden_list(hidden_list, user=None):
'''Sets property `hidden`=1 in **Desktop Icon** for given user.
If user is None then it will set global values.
It will also set the rest of the icons as shown (`hidden` = 0)'''
if isinstance(hidden_list, basestring):
hidden_list = json.loads(hidden_list)
# set as hidden
for module_name in hidden_list:
set_hidden(module_name, user, 1)
# set as seen
for module_name in list(set(get_all_icons()) - set(hidden_list)):
set_hidden(module_name, user, 0)
if user:
clear_desktop_icons_cache()
else:
frappe.clear_cache()
def set_hidden(module_name, user=None, hidden=1):
'''Set module hidden property for given user. If user is not specified,
hide/unhide it globally'''
if user:
icon = get_user_copy(module_name, user)
if hidden and icon.custom:
frappe.delete_doc(icon.doctype, icon.name, ignore_permissions=True)
return
# hidden by user
icon.db_set('hidden', hidden)
else:
icon = frappe.get_doc('Desktop Icon', {'standard': 1, 'module_name': module_name})
# blocked is globally hidden
icon.db_set('blocked', hidden)
def get_all_icons():
return [d.module_name for d in frappe.get_all('Desktop Icon',
filters={'standard': 1}, fields=['module_name'])]
def clear_desktop_icons_cache(user=None):
frappe.cache().hdel('desktop_icons', user or frappe.session.user)
frappe.cache().hdel('bootinfo', user or frappe.session.user)
def get_user_copy(module_name, user=None):
'''Return user copy (Desktop Icon) of the given module_name. If user copy does not exist, create one.
:param module_name: Name of the module
:param user: User for which the copy is required (optional)
'''
if not user:
user = frappe.session.user
desktop_icon_name = frappe.db.get_value('Desktop Icon',
{'module_name': module_name, 'owner': user, 'standard': 0})
if desktop_icon_name:
return frappe.get_doc('Desktop Icon', desktop_icon_name)
else:
return make_user_copy(module_name, user)
def make_user_copy(module_name, user):
'''Insert and return the user copy of a standard Desktop Icon'''
standard_name = frappe.db.get_value('Desktop Icon', {'module_name': module_name, 'standard': 1})
if not standard_name:
frappe.throw('{0} not found'.format(module_name), frappe.DoesNotExistError)
original = frappe.get_doc('Desktop Icon', standard_name)
desktop_icon = frappe.get_doc({
'doctype': 'Desktop Icon',
'standard': 0,
'owner': user,
'module_name': module_name
})
for key in ('app', 'label', 'route', 'type', '_doctype', 'idx', 'reverse', 'force_show'):
if original.get(key):
desktop_icon.set(key, original.get(key))
desktop_icon.insert(ignore_permissions=True)
return desktop_icon
def sync_desktop_icons():
'''Sync desktop icons from all apps'''
for app in frappe.get_installed_apps():
sync_from_app(app)
def sync_from_app(app):
'''Sync desktop icons from app. To be called during install'''
try:
modules = frappe.get_attr(app + '.config.desktop.get_data')() or {}
except ImportError:
return []
if isinstance(modules, dict):
modules_list = []
for m, desktop_icon in modules.iteritems():
desktop_icon['module_name'] = m
modules_list.append(desktop_icon)
else:
modules_list = modules
for i, m in enumerate(modules_list):
desktop_icon_name = frappe.db.get_value('Desktop Icon',
{'module_name': m['module_name'], 'app': app, 'standard': 1})
if desktop_icon_name:
desktop_icon = frappe.get_doc('Desktop Icon', desktop_icon_name)
else:
# new icon
desktop_icon = frappe.get_doc({
'doctype': 'Desktop Icon',
'idx': i,
'standard': 1,
'app': app,
'owner': 'Administrator'
})
if 'doctype' in m:
m['_doctype'] = m.pop('doctype')
desktop_icon.update(m)
desktop_icon.save()
return modules_list
palette = (
('#FFC4C4',),
('#FFE8CD',),
('#FFD2C2',),
('#FF8989',),
('#FFD19C',),
('#FFA685',),
('#FF4D4D', 1),
('#FFB868',),
('#FF7846', 1),
('#A83333', 1),
('#A87945', 1),
('#A84F2E', 1),
('#D2D2FF',),
('#F8D4F8',),
('#DAC7FF',),
('#A3A3FF',),
('#F3AAF0',),
('#B592FF',),
('#7575FF', 1),
('#EC7DEA', 1),
('#8E58FF', 1),
('#4D4DA8', 1),
('#934F92', 1),
('#5E3AA8', 1),
('#EBF8CC',),
('#FFD7D7',),
('#D2F8ED',),
('#D9F399',),
('#FFB1B1',),
('#A4F3DD',),
('#C5EC63',),
('#FF8989', 1),
('#77ECCA',),
('#7B933D', 1),
('#A85B5B', 1),
('#49937E', 1),
('#FFFACD',),
('#D2F1FF',),
('#CEF6D1',),
('#FFF69C',),
('#A6E4FF',),
('#9DECA2',),
('#FFF168',),
('#78D6FF',),
('#6BE273',),
('#A89F45', 1),
('#4F8EA8', 1),
('#428B46', 1)
)
|
|
import numpy as np
from collections import OrderedDict
from copy import deepcopy
import os
import pycqed.analysis_v2.base_analysis as ba
from pycqed.analysis import analysis_toolbox as a_tools
from pycqed.analysis import measurement_analysis as ma_old
import pygsti
from pycqed.measurement.gate_set_tomography.pygsti_helpers import \
gst_exp_filepath, pygsti_expList_from_dataset
class GST_SingleQubit_DataExtraction(ba.BaseDataAnalysis):
"""
Analysis class that extracts data from
"""
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
close_figs: bool=True,
do_fitting: bool=True, auto=True,
ch_idx: int = 0,
gst_exp_list_filepath: str=None):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
close_figs=close_figs, extract_only=extract_only,
do_fitting=do_fitting)
self.gst_exp_list_filepath = gst_exp_list_filepath
self.ch_idx = ch_idx
if auto:
self.run_analysis()
def extract_data(self):
self.raw_data_dict = OrderedDict()
self.timestamps = a_tools.get_timestamps_in_range(
self.t_start, self.t_stop,
label=self.labels)
self.raw_data_dict['timestamps'] = self.timestamps
self.timestamp = self.timestamps[0]
a = ma_old.MeasurementAnalysis(
timestamp=self.timestamp, auto=False, close_file=False)
a.get_naming_and_values()
self.raw_data_dict['xvals'] = a.sweep_points
self.raw_data_dict['xlabel'] = a.parameter_names[0]
self.raw_data_dict['xunit'] = a.parameter_units[0]
self.raw_data_dict['bins'] = a.data_file['Experimental Data']['Experimental Metadata']['bins'].value
if self.gst_exp_list_filepath == None:
gst_exp_list_filename = a.data_file['Experimental Data'][
'Experimental Metadata'].attrs['gst_exp_list_filename']
self.raw_data_dict['gst_exp_list_filepath'] = os.path.join(
gst_exp_filepath, gst_exp_list_filename)
else:
self.raw_data_dict['gst_exp_list_filepath'] = \
self.gst_exp_list_filepath
self.raw_data_dict['expList'] = pygsti_expList_from_dataset(
self.raw_data_dict['gst_exp_list_filepath'])
self.raw_data_dict['measured_values'] = a.measured_values
self.raw_data_dict['value_names'] = a.value_names
self.raw_data_dict['value_units'] = a.value_units
self.raw_data_dict['measurementstring'] = a.measurementstring
self.measurementstring = a.measurementstring
self.raw_data_dict['folder'] = a.folder
a.finish()
def process_data(self):
"""
Involves reshaping the data and writing it to a dataset textfile
"""
self.proc_data_dict = deepcopy(self.raw_data_dict)
bins = self.proc_data_dict['bins']
self.proc_data_dict['binned_values'] = []
self.proc_data_dict['binned_values_stderr'] = []
expList = self.proc_data_dict['expList']
shots_1 = self.proc_data_dict['measured_values'][self.ch_idx]
print("Nr bins:", len(bins))
print("Nr gatestrings", len(expList))
# Filter out uncompleted iterations
missing_shots = (len(shots_1) % len(expList))
if missing_shots != 0:
shots_1 = shots_1[:-missing_shots]
shots_0 = 1 - shots_1
counts_1 = np.sum(shots_1.reshape(
(len(expList), -1),
order='F'), axis=1)
counts_0 = np.sum(shots_0.reshape(
(len(expList), -1),
order='F'), axis=1)
self.proc_data_dict['counts_0'] = counts_0
self.proc_data_dict['counts_1'] = counts_1
# writing to pygsti dataset
ds = pygsti.objects.DataSet(outcomeLabels=['0', '1'])
for i, gateString in enumerate(expList):
ds.add_count_dict(gateString,
{'0': counts_0[i],
'1': counts_1[i]})
ds.done_adding_data()
ds_name = self.measurementstring+self.timestamp+'_counts.txt'
dataset_fp = os.path.join(self.raw_data_dict['folder'], ds_name)
pygsti.io.write_dataset(dataset_fp, ds)
self.proc_data_dict['dataset'] = ds
self.proc_data_dict['dataset_fp'] = dataset_fp
class GST_TwoQubit_DataExtraction(ba.BaseDataAnalysis):
"""
Analysis class that extracts data from
"""
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
close_figs: bool=True,
do_fitting: bool=True, auto=True,
ch_idx0: int = 0,
ch_idx1: int = 1,
gst_exp_list_filepath: str=None):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
close_figs=close_figs, extract_only=extract_only,
do_fitting=do_fitting)
self.gst_exp_list_filepath = gst_exp_list_filepath
self.ch_idx0 = ch_idx0
self.ch_idx1 = ch_idx1
if auto:
self.run_analysis()
def extract_data(self):
self.raw_data_dict = OrderedDict()
self.timestamps = a_tools.get_timestamps_in_range(
self.t_start, self.t_stop,
label=self.labels)
self.raw_data_dict['timestamps'] = self.timestamps
self.timestamp = self.timestamps[0]
a = ma_old.MeasurementAnalysis(
timestamp=self.timestamp, auto=False, close_file=False)
a.get_naming_and_values()
self.raw_data_dict['xvals'] = a.sweep_points
self.raw_data_dict['xlabel'] = a.parameter_names[0]
self.raw_data_dict['xunit'] = a.parameter_units[0]
self.raw_data_dict['bins'] = a.data_file['Experimental Data']['Experimental Metadata']['bins'].value
if self.gst_exp_list_filepath == None:
gst_exp_list_filename = a.data_file['Experimental Data'][
'Experimental Metadata'].attrs['gst_exp_list_filename']
self.raw_data_dict['gst_exp_list_filepath'] = os.path.join(
gst_exp_filepath, gst_exp_list_filename)
else:
self.raw_data_dict['gst_exp_list_filepath'] = \
self.gst_exp_list_filepath
self.raw_data_dict['expList'] = pygsti_expList_from_dataset(
self.raw_data_dict['gst_exp_list_filepath'])
self.raw_data_dict['measured_values'] = a.measured_values
self.raw_data_dict['value_names'] = a.value_names
self.raw_data_dict['value_units'] = a.value_units
self.raw_data_dict['measurementstring'] = a.measurementstring
self.measurementstring = a.measurementstring
self.raw_data_dict['folder'] = a.folder
a.finish()
def process_data(self):
"""
Involves reshaping the data and writing it to a dataset textfile
"""
self.proc_data_dict = deepcopy(self.raw_data_dict)
bins = self.proc_data_dict['bins']
self.proc_data_dict['binned_values'] = []
self.proc_data_dict['binned_values_stderr'] = []
expList = self.proc_data_dict['expList']
shots_q0 = self.proc_data_dict['measured_values'][self.ch_idx0]
shots_q1 = self.proc_data_dict['measured_values'][self.ch_idx1]
print("Nr bins:", len(bins))
print("Nr gatestrings", len(expList))
# Filter out uncompleted iterations
if (len(shots_q0) % len(expList))!= 0 :
shots_q0 = shots_q0[:-(len(shots_q0) % len(expList))]
shots_q1 = shots_q1[:-(len(shots_q1) % len(expList))]
# LSQ (q0) is last entry in list
shots_00 = (1-shots_q1) * (1-shots_q0)
shots_01 = (1-shots_q1) * (shots_q0)
shots_10 = (shots_q1) * (1-shots_q0)
shots_11 = (shots_q1) * (shots_q0)
counts_00 = np.sum(np.reshape(shots_00, (len(expList),
len(shots_00)//len(expList)), order='F'), axis=1)
counts_01 = np.sum(np.reshape(shots_01, (len(expList),
len(shots_01)//len(expList)), order='F'), axis=1)
counts_10 = np.sum(np.reshape(shots_10, (len(expList),
len(shots_10)//len(expList)), order='F'), axis=1)
counts_11 = np.sum(np.reshape(shots_11, (len(expList),
len(shots_11)//len(expList)), order='F'), axis=1)
# writing to pygsti dataset
outcomeLabels = [('00',), ('01',), ('10',), ('11',)]
ds = pygsti.objects.DataSet(outcomeLabels=outcomeLabels)
for i, gateString in enumerate(expList):
ds.add_count_dict(gateString,
{'00': counts_00[i], '01': counts_01[i],
'10': counts_10[i], '11': counts_11[i]})
ds.done_adding_data()
ds_name = self.measurementstring+self.timestamp+'_counts.txt'
dataset_fp = os.path.join(self.raw_data_dict['folder'], ds_name)
pygsti.io.write_dataset(dataset_fp, ds)
self.proc_data_dict['dataset'] = ds
self.proc_data_dict['dataset_fp'] = dataset_fp
|
|
from vtk.vtkCommonDataModel import vtkDataObject
from vtk.vtkCommonExecutionModel import vtkAlgorithm
from vtk.vtkCommonExecutionModel import vtkDemandDrivenPipeline
from vtk.vtkCommonExecutionModel import vtkStreamingDemandDrivenPipeline
from vtk.vtkFiltersPython import vtkPythonAlgorithm
class VTKAlgorithm(object):
"""This is a superclass which can be derived to implement
Python classes that work with vtkPythonAlgorithm. It implements
Initialize(), ProcessRequest(), FillInputPortInformation() and
FillOutputPortInformation().
Initialize() sets the input and output ports based on data
members.
ProcessRequest() calls RequestXXX() methods to implement
various pipeline passes.
FillInputPortInformation() and FillOutputPortInformation() set
the input and output types based on data members.
"""
def __init__(self, nInputPorts=1, inputType='vtkDataSet',
nOutputPorts=1, outputType='vtkPolyData'):
"""Sets up default NumberOfInputPorts, NumberOfOutputPorts,
InputType and OutputType that are used by various initialization
methods."""
self.NumberOfInputPorts = nInputPorts
self.NumberOfOutputPorts = nOutputPorts
self.InputType = inputType
self.OutputType = outputType
def Initialize(self, vtkself):
"""Sets up number of input and output ports based on
NumberOfInputPorts and NumberOfOutputPorts."""
vtkself.SetNumberOfInputPorts(self.NumberOfInputPorts)
vtkself.SetNumberOfOutputPorts(self.NumberOfOutputPorts)
def GetInputData(self, inInfo, i, j):
"""Convenience method that returns an input data object
given a vector of information objects and two indices."""
return inInfo[i].GetInformationObject(j).Get(vtkDataObject.DATA_OBJECT())
def GetOutputData(self, outInfo, i):
"""Convenience method that returns an output data object
given an information object and an index."""
return outInfo.GetInformationObject(i).Get(vtkDataObject.DATA_OBJECT())
def RequestDataObject(self, vtkself, request, inInfo, outInfo):
"""Overwritten by subclass to manage data object creation.
There is not need to overwrite this class if the output can
be created based on the OutputType data member."""
return 1
def RequestInformation(self, vtkself, request, inInfo, outInfo):
"""Overwritten by subclass to provide meta-data to downstream
pipeline."""
return 1
def RequestUpdateExtent(self, vtkself, request, inInfo, outInfo):
"""Overwritten by subclass to modify data request going
to upstream pipeline."""
return 1
def RequestData(self, vtkself, request, inInfo, outInfo):
"""Overwritten by subclass to execute the algorithm."""
raise NotImplementedError('RequestData must be implemented')
def ProcessRequest(self, vtkself, request, inInfo, outInfo):
"""Splits a request to RequestXXX() methods."""
if request.Has(vtkDemandDrivenPipeline.REQUEST_DATA_OBJECT()):
return self.RequestDataObject(vtkself, request, inInfo, outInfo)
elif request.Has(vtkDemandDrivenPipeline.REQUEST_INFORMATION()):
return self.RequestInformation(vtkself, request, inInfo, outInfo)
elif request.Has(vtkStreamingDemandDrivenPipeline.REQUEST_UPDATE_EXTENT()):
return self.RequestUpdateExtent(vtkself, request, inInfo, outInfo)
elif request.Has(vtkDemandDrivenPipeline.REQUEST_DATA()):
return self.RequestData(vtkself, request, inInfo, outInfo)
return 1
def FillInputPortInformation(self, vtkself, port, info):
"""Sets the required input type to InputType."""
info.Set(vtkAlgorithm.INPUT_REQUIRED_DATA_TYPE(), self.InputType)
return 1
def FillOutputPortInformation(self, vtkself, port, info):
"""Sets the default output type to OutputType."""
info.Set(vtkDataObject.DATA_TYPE_NAME(), self.OutputType)
return 1
class VTKPythonAlgorithmBase(vtkPythonAlgorithm):
"""This is a superclass which can be derived to implement
Python classes that act as VTK algorithms in a VTK pipeline.
It implements ProcessRequest(), FillInputPortInformation() and
FillOutputPortInformation().
ProcessRequest() calls RequestXXX() methods to implement
various pipeline passes.
FillInputPortInformation() and FillOutputPortInformation() set
the input and output types based on data members.
Common use is something like this:
class HDF5Source(VTKPythonAlgorithmBase):
def __init__(self):
VTKPythonAlgorithmBase.__init__(self,
nInputPorts=0,
nOutputPorts=1, outputType='vtkImageData')
def RequestInformation(self, request, inInfo, outInfo):
f = h5py.File("foo.h5", 'r')
dims = f['RTData'].shape[::-1]
info = outInfo.GetInformationObject(0)
info.Set(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(),
(0, dims[0]-1, 0, dims[1]-1, 0, dims[2]-1), 6)
return 1
def RequestData(self, request, inInfo, outInfo):
f = h5py.File("foo.h5", 'r')
data = f['RTData'][:]
output = dsa.WrapDataObject(vtk.vtkImageData.GetData(outInfo))
output.SetDimensions(data.shape)
output.PointData.append(data.flatten(), 'RTData')
output.PointData.SetActiveScalars('RTData')
return 1
alg = HDF5Source()
cf = vtk.vtkContourFilter()
cf.SetInputConnection(alg.GetOutputPort())
cf.Update()
"""
class InternalAlgorithm(object):
"Internal class. Do not use."
def Initialize(self, vtkself):
pass
def FillInputPortInformation(self, vtkself, port, info):
return vtkself.FillInputPortInformation(port, info)
def FillOutputPortInformation(self, vtkself, port, info):
return vtkself.FillOutputPortInformation(port, info)
def ProcessRequest(self, vtkself, request, inInfo, outInfo):
return vtkself.ProcessRequest(request, inInfo, outInfo)
def __init__(self, nInputPorts=1, inputType='vtkDataSet',
nOutputPorts=1, outputType='vtkPolyData'):
"""Sets up default NumberOfInputPorts, NumberOfOutputPorts,
InputType and OutputType that are used by various methods.
Make sure to call this method from any subclass' __init__"""
self.SetPythonObject(VTKPythonAlgorithmBase.InternalAlgorithm())
self.SetNumberOfInputPorts(nInputPorts)
self.SetNumberOfOutputPorts(nOutputPorts)
self.InputType = inputType
self.OutputType = outputType
def GetInputData(self, inInfo, i, j):
"""Convenience method that returns an input data object
given a vector of information objects and two indices."""
return inInfo[i].GetInformationObject(j).Get(vtkDataObject.DATA_OBJECT())
def GetOutputData(self, outInfo, i):
"""Convenience method that returns an output data object
given an information object and an index."""
return outInfo.GetInformationObject(i).Get(vtkDataObject.DATA_OBJECT())
def FillInputPortInformation(self, port, info):
"""Sets the required input type to InputType."""
info.Set(vtkAlgorithm.INPUT_REQUIRED_DATA_TYPE(), self.InputType)
return 1
def FillOutputPortInformation(self, port, info):
"""Sets the default output type to OutputType."""
info.Set(vtkDataObject.DATA_TYPE_NAME(), self.OutputType)
return 1
def ProcessRequest(self, request, inInfo, outInfo):
"""Splits a request to RequestXXX() methods."""
if request.Has(vtkDemandDrivenPipeline.REQUEST_DATA_OBJECT()):
return self.RequestDataObject(request, inInfo, outInfo)
elif request.Has(vtkDemandDrivenPipeline.REQUEST_INFORMATION()):
return self.RequestInformation(request, inInfo, outInfo)
elif request.Has(vtkStreamingDemandDrivenPipeline.REQUEST_UPDATE_EXTENT()):
return self.RequestUpdateExtent(request, inInfo, outInfo)
elif request.Has(vtkDemandDrivenPipeline.REQUEST_DATA()):
return self.RequestData(request, inInfo, outInfo)
return 1
def RequestDataObject(self, request, inInfo, outInfo):
"""Overwritten by subclass to manage data object creation.
There is not need to overwrite this class if the output can
be created based on the OutputType data member."""
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Overwritten by subclass to provide meta-data to downstream
pipeline."""
return 1
def RequestUpdateExtent(self, request, inInfo, outInfo):
"""Overwritten by subclass to modify data request going
to upstream pipeline."""
return 1
def RequestData(self, vtkself, request, inInfo, outInfo):
"""Overwritten by subclass to execute the algorithm."""
raise NotImplementedError('RequestData must be implemented')
|
|
# -*- coding: utf-8 -*-
"""
collectr.models
---------------
This module contains the main models used by collectr.
:copyright: (c) 2013 Cory Benfield
:license: MIT License, see LICENSE for details.
"""
from .utils import (tree_walk, match_regexes, move_path, minified_filename,
get_extension, default_minifier, should_update_key)
from .exceptions import MinifierError
import re
import subprocess
from boto.s3.connection import S3Connection
from boto.exception import S3ResponseError
from boto.s3.key import Key
class StaticDir(object):
"""
This class represents a single directory or directory tree of static files.
This class can be created by the user, or used internally in collectr.
:param directory: The root of the directory tree.
"""
def __init__(self, directory):
#: The root of the directory tree.
self.directory = directory
#: A string representing the location of static files that have yet to
#: be minified. Files in this directory will be minified, and the
#: minified versions will be saved to 'dir'.
self.input_directory = None
#: The minifier to use on CSS and/or Javascript files. May be a
#: dictionary whose keys correspond to file extensions or a string. The
#: string is considered a special case, and will be applied to both
#: Javascript and CSS files.
#: If the dictionary is used, the keys correspond to a command-line to
#: run. This should be a python format string with two string
#: variables, {in_name} and {out_name}. These refer to the input and
#: output filename respectively. The string must be able to have
#: .format() called on it.
self.minifier = default_minifier()
#: Whether to update all files, regardless of whether they have been
#: changed.
self.force_update = False
#: Files to ignore. Should be a list of regular expressions. Everything
#: that matches any of these regular expressions will be totally
#: ignored. The regex will be applied to the relative path.
self.ignore = []
#: A dictionary of keys and values that correspond to the metadata that
#: should be applied to the files. This metadata will be applied to
#: _all_ the files found by this :class:`StaticDir <StaticDir>`.
self.metadata = {}
def update(self, bucket_name):
"""
Connect to S3 and update the bucket with the static files from the
directory.
:param bucket_name: The name of the S3 bucket to upload to.
"""
self.minify_files()
files = self.enumerate_files(self.directory)
conn = self.connect_s3()
self.upload_files(files, bucket_name, conn)
return
def enumerate_files(self, directory):
"""
Enumerate all the files beneath a directory. Walks into all
directories except for those created by version control.
:param directory: The root of the tree.
"""
files = tree_walk(directory)
# Ignore some files.
files = self.filter_files(files)
return files
def filter_files(self, files):
"""
Given a list of files, remove any that match any of a list of regular
expressions.
"""
tests = [re.compile(x) for x in self.ignore]
# Bail early if we don't have any regexes to match.
if tests:
# Remove files that are matched by regexes.
files = [name for name in files if not match_regexes(tests, name)]
return files
def minify_files(self):
"""
Takes all the files either in the main directory or the input directory
and minifies them. If the files came from the input directory, moves
them to the main directory.
"""
if self.input_directory:
files = self.enumerate_files(self.input_directory)
else:
files = self.enumerate_files(self.directory)
# Strings are a special case: apply that special case.
if isinstance(self.minifier, basestring):
minifier = {'css': self.minifier, 'js': self.minifier}
else:
minifier = self.minifier
# For each file, if its extension has a minifier associated with it,
# apply it.
for name in files:
try:
command = minifier[get_extension(name)]
command = command.format(in_name=name,
out_name=self.get_output_name(name))
rc = subprocess.call(command, shell=True)
if rc != 0:
raise MinifierError("Error occurred during minification.")
except KeyError:
# If there's no minifier command, don't touch it.
continue
return
def get_output_name(self, input_filename):
"""
When minifying a file, determine its output filename. This depends on
whether it's being copied to a new directory.
"""
if self.input_directory:
filename = move_path(self.input_directory,
self.directory,
input_filename)
filename = minified_filename(filename)
else:
filename = minified_filename(input_filename)
return filename
def connect_s3(self):
"""
Connect to S3. Returns the boto connection object.
"""
return S3Connection()
def upload_files(self, files, bucket_name, connection):
"""
Given a list of files, an Amazon S3 bucket and a connection, uploads
the files to the bucket. If the bucket doesn't exist, creates it.
"""
# First get the bucket. If it exists, great. If not, create it.
try:
bucket = connection.get_bucket(bucket_name)
except S3ResponseError:
bucket = connection.create_bucket(bucket_name)
# For each file, create an S3 key and upload the data. Then, set the
# metadata.
for path in files:
key = self.find_or_create_key(path)
if self.force_update or should_update_key(key, path):
key.set_contents_from_filename(path)
for metakey, metavalue in self.metadata.iteritems():
key.set_metadata(metakey, metavalue)
# Set the visibility to public-read.
key.set_acl('public-read')
# All done.
return
def apply_metadata(self, key):
"""
Apply any expected metadata to an S3 key. If the value is a dict, the
key is treated as a regular expression that must match the file path.
Otherwise, the key and value are applied to all keys.
"""
for metakey, metavalue in self.metadata.iteritems():
# If the value is a dict and the regex matches, apply all the key-
# value pairs.
if isinstance(metavalue, dict) and re.search(metakey, key.key):
for newkey, newvalue in metavalue.iteritems():
key.set_metadata(newkey, newvalue)
# Otherwise, always apply the key and value.
elif not isinstance(metavalue, dict):
key.set_metadata(metakey, metavalue)
return
def find_or_create_key(self, path, bucket):
"""
For a given file, checks whether it's in the S3 bucket. If it is,
returns the key object corresponding to it. If not, creates a new key
and returns it.
"""
name = self.key_name_from_path(path)
key = bucket.lookup(name)
if not key:
key = Key(bucket)
key.key = name
return key
def key_name_from_path(self, path):
"""
Get the name of an S3 key from the path on the filesystem.
"""
if self.directory[-1] != '/':
temp_directory = self.directory + '/'
else:
temp_directory = self.directory
return path.replace(temp_directory, '', 1)
|
|
#
# lascanopyPro.py
#
# (c) 2013, martin isenburg - http://rapidlasso.com
# rapidlasso GmbH - fast tools to catch reality
#
# uses lascanopy.exe to generate forestry metrics
#
# LiDAR input: LAS/LAZ/BIN/TXT/SHP/BIL/ASC/DTM
# raster output: BIL/ASC/IMG/TIF/DTM/PNG/JPG
#
# for licensing see http://lastools.org/LICENSE.txt
#
import sys, os, arcgisscripting, subprocess
def check_output(command,console):
if console == True:
process = subprocess.Popen(command)
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
output,error = process.communicate()
returncode = process.poll()
return returncode,output
### create the geoprocessor object
gp = arcgisscripting.create(9.3)
### report that something is happening
gp.AddMessage("Starting lascanopy production ...")
### get number of arguments
argc = len(sys.argv)
### report arguments (for debug)
#gp.AddMessage("Arguments:")
#for i in range(0, argc):
# gp.AddMessage("[" + str(i) + "]" + sys.argv[i])
### get the path to LAStools
lastools_path = os.path.dirname(os.path.dirname(os.path.dirname(sys.argv[0])))
### make sure the path does not contain spaces
if lastools_path.count(" ") > 0:
gp.AddMessage("Error. Path to .\\lastools installation contains spaces.")
gp.AddMessage("This does not work: " + lastools_path)
gp.AddMessage("This would work: C:\\software\\lastools")
sys.exit(1)
### complete the path to where the LAStools executables are
lastools_path = lastools_path + "\\bin"
### check if path exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find .\\lastools\\bin at " + lastools_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lastools_path + " ...")
### create the full path to the lascanopy executable
lascanopy_path = lastools_path+"\\lascanopy.exe"
### check if executable exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find lascanopy.exe at " + lascanopy_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lascanopy_path + " ...")
### create the command string for lascanopy.exe
command = ['"'+lascanopy_path+'"']
### maybe use '-verbose' option
if sys.argv[argc-1] == "true":
command.append("-v")
### counting up the arguments
c = 1
### add input LiDAR
wildcards = sys.argv[c+1].split()
for wildcard in wildcards:
command.append("-i")
command.append('"' + sys.argv[c] + "\\" + wildcard + '"')
c = c + 2
### maybe we should merge all files into one
if sys.argv[c] == "true":
command.append("-merged")
c = c + 1
### maybe use a user-defined step size
if sys.argv[c] != "20":
command.append("-step")
command.append(sys.argv[c].replace(",","."))
c = c + 1
### maybe use a user-defined height cutoff
if sys.argv[c].replace(",",".") != "1.37":
command.append("-height_cutoff")
command.append(sys.argv[c].replace(",","."))
c = c + 1
### maybe there are products requested
products = sys.argv[c].split(";")
for product in products:
if (product[0] == "'"):
subproducts = product[1:-1].split(" ")
command.append("-"+subproducts[0])
for subproduct in subproducts[1:]:
command.append(subproduct)
else:
command.append("-"+product)
c = c + 1
### maybe there are count raster requested
counts = sys.argv[c].split()
if len(counts) > 1:
command.append("-c")
for count in counts:
command.append(count.replace(",","."))
c = c + 1
### maybe there are density raster requested
densities = sys.argv[c].split()
if len(densities) > 1:
command.append("-d")
for density in densities:
command.append(density.replace(",","."))
c = c + 1
### should we use the bounding box
if sys.argv[c] == "true":
command.append("-use_bb")
c = c + 1
### should we use the original bounding box
if sys.argv[c] == "true":
command.append("-use_orig_bb")
c = c + 1
### should we use the tile bounding box
if sys.argv[c] == "true":
command.append("-use_tile_bb")
c = c + 1
### maybe an output format was selected
if sys.argv[c] != "#":
command.append("-o" + sys.argv[c])
c = c + 1
### maybe an output file name was selected
if sys.argv[c] != "#":
command.append("-o")
command.append('"'+sys.argv[c]+'"')
c = c + 1
### maybe an output directory was selected
if sys.argv[c] != "#":
command.append("-odir")
command.append('"'+sys.argv[c]+'"')
c = c + 1
### maybe an output appendix was selected
if sys.argv[c] != "#":
command.append("-odix")
command.append('"'+sys.argv[c]+'"')
c = c + 1
### maybe we should run on multiple cores
if sys.argv[c] != "1":
command.append("-cores")
command.append(sys.argv[c])
c = c + 1
### maybe there are additional input options
if sys.argv[c] != "#":
additional_options = sys.argv[c].split()
for option in additional_options:
command.append(option)
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of lascanopy
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. lascanopy failed.")
sys.exit(1)
### report happy end
gp.AddMessage("Success. lascanopy done.")
|
|
import numpy as np
class Node(object):
"""
Base class for nodes in the network.
Arguments:
`inbound_nodes`: A list of nodes with edges into this node.
"""
def __init__(self, inbound_nodes=[]):
"""
Node's constructor (runs when the object is instantiated). Sets
properties that all nodes need.
"""
# A list of nodes with edges into this node.
self.inbound_nodes = inbound_nodes
# The eventual value of this node. Set by running
# the forward() method.
self.value = None
# A list of nodes that this node outputs to.
self.outbound_nodes = []
# New property! Keys are the inputs to this node and
# their values are the partials of this node with
# respect to that input.
self.gradients = {}
# Sets this node as an outbound node for all of
# this node's inputs.
for node in inbound_nodes:
node.outbound_nodes.append(self)
def forward(self):
"""
Every node that uses this class as a base class will
need to define its own `forward` method.
"""
raise NotImplementedError
def backward(self):
"""
Every node that uses this class as a base class will
need to define its own `backward` method.
"""
raise NotImplementedError
class Input(Node):
"""
A generic input into the network.
"""
def __init__(self):
# The base class constructor has to run to set all
# the properties here.
#
# The most important property on an Input is value.
# self.value is set during `topological_sort` later.
Node.__init__(self)
def forward(self):
# Do nothing because nothing is calculated.
pass
def backward(self):
# An Input node has no inputs so the gradient (derivative)
# is zero.
# The key, `self`, is reference to this object.
self.gradients = {self: 0}
# Weights and bias may be inputs, so you need to sum
# the gradient from output gradients.
for n in self.outbound_nodes:
self.gradients[self] += n.gradients[self]
class Linear(Node):
"""
Represents a node that performs a linear transform.
"""
def __init__(self, X, W, b):
# The base class (Node) constructor. Weights and bias
# are treated like inbound nodes.
Node.__init__(self, [X, W, b])
def forward(self):
"""
Performs the math behind a linear transform.
"""
X = self.inbound_nodes[0].value
W = self.inbound_nodes[1].value
b = self.inbound_nodes[2].value
self.value = np.dot(X, W) + b
def backward(self):
"""
Calculates the gradient based on the output values.
"""
# Initialize a partial for each of the inbound_nodes.
self.gradients = {n: np.zeros_like(n.value) for n in self.inbound_nodes}
# Cycle through the outputs. The gradient will change depending
# on each output, so the gradients are summed over all outputs.
for n in self.outbound_nodes:
# Get the partial of the cost with respect to this node.
grad_cost = n.gradients[self]
# Set the partial of the loss with respect to this node's inputs.
self.gradients[self.inbound_nodes[0]] += np.dot(grad_cost, self.inbound_nodes[1].value.T)
# Set the partial of the loss with respect to this node's weights.
self.gradients[self.inbound_nodes[1]] += np.dot(self.inbound_nodes[0].value.T, grad_cost)
# Set the partial of the loss with respect to this node's bias.
self.gradients[self.inbound_nodes[2]] += np.sum(grad_cost, axis=0, keepdims=False)
class Sigmoid(Node):
"""
Represents a node that performs the sigmoid activation function.
"""
def __init__(self, node):
# The base class constructor.
Node.__init__(self, [node])
def _sigmoid(self, x):
"""
This method is separate from `forward` because it
will be used with `backward` as well.
`x`: A numpy array-like object.
"""
return 1. / (1. + np.exp(-x))
def forward(self):
"""
Perform the sigmoid function and set the value.
"""
input_value = self.inbound_nodes[0].value
self.value = self._sigmoid(input_value)
def backward(self):
"""
Calculates the gradient using the derivative of
the sigmoid function.
"""
# Initialize the gradients to 0.
self.gradients = {n: np.zeros_like(n.value) for n in self.inbound_nodes}
# Sum the partial with respect to the input over all the outputs.
for n in self.outbound_nodes:
grad_cost = n.gradients[self]
sigmoid = self.value
self.gradients[self.inbound_nodes[0]] += sigmoid * (1 - sigmoid) * grad_cost
class MSE(Node):
def __init__(self, y, a):
"""
The mean squared error cost function.
Should be used as the last node for a network.
"""
# Call the base class' constructor.
Node.__init__(self, [y, a])
def forward(self):
"""
Calculates the mean squared error.
"""
# NOTE: We reshape these to avoid possible matrix/vector broadcast
# errors.
#
# For example, if we subtract an array of shape (3,) from an array of shape
# (3,1) we get an array of shape(3,3) as the result when we want
# an array of shape (3,1) instead.
#
# Making both arrays (3,1) insures the result is (3,1) and does
# an elementwise subtraction as expected.
y = self.inbound_nodes[0].value.reshape(-1, 1)
a = self.inbound_nodes[1].value.reshape(-1, 1)
self.m = self.inbound_nodes[0].value.shape[0]
# Save the computed output for backward.
self.diff = y - a
self.value = np.mean(self.diff**2)
def backward(self):
"""
Calculates the gradient of the cost.
"""
self.gradients[self.inbound_nodes[0]] = (2 / self.m) * self.diff
self.gradients[self.inbound_nodes[1]] = (-2 / self.m) * self.diff
def topological_sort(feed_dict):
"""
Sort the nodes in topological order using Kahn's Algorithm.
`feed_dict`: A dictionary where the key is a `Input` Node and the value is the respective value feed to that Node.
Returns a list of sorted nodes.
"""
input_nodes = [n for n in feed_dict.keys()]
G = {}
nodes = [n for n in input_nodes]
while len(nodes) > 0:
n = nodes.pop(0)
if n not in G:
G[n] = {'in': set(), 'out': set()}
for m in n.outbound_nodes:
if m not in G:
G[m] = {'in': set(), 'out': set()}
G[n]['out'].add(m)
G[m]['in'].add(n)
nodes.append(m)
L = []
S = set(input_nodes)
while len(S) > 0:
n = S.pop()
if isinstance(n, Input):
n.value = feed_dict[n]
L.append(n)
for m in n.outbound_nodes:
G[n]['out'].remove(m)
G[m]['in'].remove(n)
# if no other incoming edges add to S
if len(G[m]['in']) == 0:
S.add(m)
return L
def forward_and_backward(graph):
"""
Performs a forward pass and a backward pass through a list of sorted Nodes.
Arguments:
`graph`: The result of calling `topological_sort`.
"""
# Forward pass
for n in graph:
n.forward()
# Backward pass
# see: https://docs.python.org/2.3/whatsnew/section-slices.html
for n in graph[::-1]:
n.backward()
def sgd_update(trainables, learning_rate=1e-2):
"""
Updates the value of each trainable with SGD.
Arguments:
`trainables`: A list of `Input` Nodes representing weights/biases.
`learning_rate`: The learning rate.
"""
# TODO: update all the `trainables` with SGD
# You can access and assign the value of a trainable with `value` attribute.
# Example:
# for t in trainables:
# t.value = your implementation here
pass
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._key_vault_client_enums import *
class Action(msrest.serialization.Model):
"""The action that will be executed.
:param action_type: The type of the action. Possible values include: "EmailContacts",
"AutoRenew".
:type action_type: str or ~azure.keyvault.v7_3_preview.models.ActionType
"""
_attribute_map = {
'action_type': {'key': 'action_type', 'type': 'str'},
}
def __init__(
self,
*,
action_type: Optional[Union[str, "ActionType"]] = None,
**kwargs
):
super(Action, self).__init__(**kwargs)
self.action_type = action_type
class AdministratorDetails(msrest.serialization.Model):
"""Details of the organization administrator of the certificate issuer.
:param first_name: First name.
:type first_name: str
:param last_name: Last name.
:type last_name: str
:param email_address: Email address.
:type email_address: str
:param phone: Phone number.
:type phone: str
"""
_attribute_map = {
'first_name': {'key': 'first_name', 'type': 'str'},
'last_name': {'key': 'last_name', 'type': 'str'},
'email_address': {'key': 'email', 'type': 'str'},
'phone': {'key': 'phone', 'type': 'str'},
}
def __init__(
self,
*,
first_name: Optional[str] = None,
last_name: Optional[str] = None,
email_address: Optional[str] = None,
phone: Optional[str] = None,
**kwargs
):
super(AdministratorDetails, self).__init__(**kwargs)
self.first_name = first_name
self.last_name = last_name
self.email_address = email_address
self.phone = phone
class Attributes(msrest.serialization.Model):
"""The object attributes managed by the KeyVault service.
Variables are only populated by the server, and will be ignored when sending a request.
:param enabled: Determines whether the object is enabled.
:type enabled: bool
:param not_before: Not before date in UTC.
:type not_before: ~datetime.datetime
:param expires: Expiry date in UTC.
:type expires: ~datetime.datetime
:ivar created: Creation time in UTC.
:vartype created: ~datetime.datetime
:ivar updated: Last updated time in UTC.
:vartype updated: ~datetime.datetime
"""
_validation = {
'created': {'readonly': True},
'updated': {'readonly': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'not_before': {'key': 'nbf', 'type': 'unix-time'},
'expires': {'key': 'exp', 'type': 'unix-time'},
'created': {'key': 'created', 'type': 'unix-time'},
'updated': {'key': 'updated', 'type': 'unix-time'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
not_before: Optional[datetime.datetime] = None,
expires: Optional[datetime.datetime] = None,
**kwargs
):
super(Attributes, self).__init__(**kwargs)
self.enabled = enabled
self.not_before = not_before
self.expires = expires
self.created = None
self.updated = None
class BackupCertificateResult(msrest.serialization.Model):
"""The backup certificate result, containing the backup blob.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The backup blob containing the backed up certificate.
:vartype value: bytes
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'base64'},
}
def __init__(
self,
**kwargs
):
super(BackupCertificateResult, self).__init__(**kwargs)
self.value = None
class CertificateAttributes(Attributes):
"""The certificate management attributes.
Variables are only populated by the server, and will be ignored when sending a request.
:param enabled: Determines whether the object is enabled.
:type enabled: bool
:param not_before: Not before date in UTC.
:type not_before: ~datetime.datetime
:param expires: Expiry date in UTC.
:type expires: ~datetime.datetime
:ivar created: Creation time in UTC.
:vartype created: ~datetime.datetime
:ivar updated: Last updated time in UTC.
:vartype updated: ~datetime.datetime
:ivar recoverable_days: softDelete data retention days. Value should be >=7 and <=90 when
softDelete enabled, otherwise 0.
:vartype recoverable_days: int
:ivar recovery_level: Reflects the deletion recovery level currently in effect for certificates
in the current vault. If it contains 'Purgeable', the certificate can be permanently deleted by
a privileged user; otherwise, only the system can purge the certificate, at the end of the
retention interval. Possible values include: "Purgeable", "Recoverable+Purgeable",
"Recoverable", "Recoverable+ProtectedSubscription", "CustomizedRecoverable+Purgeable",
"CustomizedRecoverable", "CustomizedRecoverable+ProtectedSubscription".
:vartype recovery_level: str or ~azure.keyvault.v7_3_preview.models.DeletionRecoveryLevel
"""
_validation = {
'created': {'readonly': True},
'updated': {'readonly': True},
'recoverable_days': {'readonly': True},
'recovery_level': {'readonly': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'not_before': {'key': 'nbf', 'type': 'unix-time'},
'expires': {'key': 'exp', 'type': 'unix-time'},
'created': {'key': 'created', 'type': 'unix-time'},
'updated': {'key': 'updated', 'type': 'unix-time'},
'recoverable_days': {'key': 'recoverableDays', 'type': 'int'},
'recovery_level': {'key': 'recoveryLevel', 'type': 'str'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
not_before: Optional[datetime.datetime] = None,
expires: Optional[datetime.datetime] = None,
**kwargs
):
super(CertificateAttributes, self).__init__(enabled=enabled, not_before=not_before, expires=expires, **kwargs)
self.recoverable_days = None
self.recovery_level = None
class CertificateBundle(msrest.serialization.Model):
"""A certificate bundle consists of a certificate (X509) plus its attributes.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The certificate id.
:vartype id: str
:ivar kid: The key id.
:vartype kid: str
:ivar sid: The secret id.
:vartype sid: str
:ivar x509_thumbprint: Thumbprint of the certificate.
:vartype x509_thumbprint: bytes
:ivar policy: The management policy.
:vartype policy: ~azure.keyvault.v7_3_preview.models.CertificatePolicy
:param cer: CER contents of x509 certificate.
:type cer: bytearray
:param content_type: The content type of the secret. eg. 'application/x-pem-file' or
'application/x-pkcs12',.
:type content_type: str
:param attributes: The certificate attributes.
:type attributes: ~azure.keyvault.v7_3_preview.models.CertificateAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'kid': {'readonly': True},
'sid': {'readonly': True},
'x509_thumbprint': {'readonly': True},
'policy': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'kid': {'key': 'kid', 'type': 'str'},
'sid': {'key': 'sid', 'type': 'str'},
'x509_thumbprint': {'key': 'x5t', 'type': 'base64'},
'policy': {'key': 'policy', 'type': 'CertificatePolicy'},
'cer': {'key': 'cer', 'type': 'bytearray'},
'content_type': {'key': 'contentType', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'CertificateAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
cer: Optional[bytearray] = None,
content_type: Optional[str] = None,
attributes: Optional["CertificateAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(CertificateBundle, self).__init__(**kwargs)
self.id = None
self.kid = None
self.sid = None
self.x509_thumbprint = None
self.policy = None
self.cer = cer
self.content_type = content_type
self.attributes = attributes
self.tags = tags
class CertificateCreateParameters(msrest.serialization.Model):
"""The certificate create parameters.
:param certificate_policy: The management policy for the certificate.
:type certificate_policy: ~azure.keyvault.v7_3_preview.models.CertificatePolicy
:param certificate_attributes: The attributes of the certificate (optional).
:type certificate_attributes: ~azure.keyvault.v7_3_preview.models.CertificateAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
"""
_attribute_map = {
'certificate_policy': {'key': 'policy', 'type': 'CertificatePolicy'},
'certificate_attributes': {'key': 'attributes', 'type': 'CertificateAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
certificate_policy: Optional["CertificatePolicy"] = None,
certificate_attributes: Optional["CertificateAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(CertificateCreateParameters, self).__init__(**kwargs)
self.certificate_policy = certificate_policy
self.certificate_attributes = certificate_attributes
self.tags = tags
class CertificateImportParameters(msrest.serialization.Model):
"""The certificate import parameters.
All required parameters must be populated in order to send to Azure.
:param base64_encoded_certificate: Required. Base64 encoded representation of the certificate
object to import. This certificate needs to contain the private key.
:type base64_encoded_certificate: str
:param password: If the private key in base64EncodedCertificate is encrypted, the password used
for encryption.
:type password: str
:param certificate_policy: The management policy for the certificate.
:type certificate_policy: ~azure.keyvault.v7_3_preview.models.CertificatePolicy
:param certificate_attributes: The attributes of the certificate (optional).
:type certificate_attributes: ~azure.keyvault.v7_3_preview.models.CertificateAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
"""
_validation = {
'base64_encoded_certificate': {'required': True},
}
_attribute_map = {
'base64_encoded_certificate': {'key': 'value', 'type': 'str'},
'password': {'key': 'pwd', 'type': 'str'},
'certificate_policy': {'key': 'policy', 'type': 'CertificatePolicy'},
'certificate_attributes': {'key': 'attributes', 'type': 'CertificateAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
base64_encoded_certificate: str,
password: Optional[str] = None,
certificate_policy: Optional["CertificatePolicy"] = None,
certificate_attributes: Optional["CertificateAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(CertificateImportParameters, self).__init__(**kwargs)
self.base64_encoded_certificate = base64_encoded_certificate
self.password = password
self.certificate_policy = certificate_policy
self.certificate_attributes = certificate_attributes
self.tags = tags
class CertificateIssuerItem(msrest.serialization.Model):
"""The certificate issuer item containing certificate issuer metadata.
:param id: Certificate Identifier.
:type id: str
:param provider: The issuer provider.
:type provider: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'provider': {'key': 'provider', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
provider: Optional[str] = None,
**kwargs
):
super(CertificateIssuerItem, self).__init__(**kwargs)
self.id = id
self.provider = provider
class CertificateIssuerListResult(msrest.serialization.Model):
"""The certificate issuer list result.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: A response message containing a list of certificate issuers in the key vault along
with a link to the next page of certificate issuers.
:vartype value: list[~azure.keyvault.v7_3_preview.models.CertificateIssuerItem]
:ivar next_link: The URL to get the next set of certificate issuers.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[CertificateIssuerItem]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CertificateIssuerListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class CertificateIssuerSetParameters(msrest.serialization.Model):
"""The certificate issuer set parameters.
All required parameters must be populated in order to send to Azure.
:param provider: Required. The issuer provider.
:type provider: str
:param credentials: The credentials to be used for the issuer.
:type credentials: ~azure.keyvault.v7_3_preview.models.IssuerCredentials
:param organization_details: Details of the organization as provided to the issuer.
:type organization_details: ~azure.keyvault.v7_3_preview.models.OrganizationDetails
:param attributes: Attributes of the issuer object.
:type attributes: ~azure.keyvault.v7_3_preview.models.IssuerAttributes
"""
_validation = {
'provider': {'required': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'IssuerCredentials'},
'organization_details': {'key': 'org_details', 'type': 'OrganizationDetails'},
'attributes': {'key': 'attributes', 'type': 'IssuerAttributes'},
}
def __init__(
self,
*,
provider: str,
credentials: Optional["IssuerCredentials"] = None,
organization_details: Optional["OrganizationDetails"] = None,
attributes: Optional["IssuerAttributes"] = None,
**kwargs
):
super(CertificateIssuerSetParameters, self).__init__(**kwargs)
self.provider = provider
self.credentials = credentials
self.organization_details = organization_details
self.attributes = attributes
class CertificateIssuerUpdateParameters(msrest.serialization.Model):
"""The certificate issuer update parameters.
:param provider: The issuer provider.
:type provider: str
:param credentials: The credentials to be used for the issuer.
:type credentials: ~azure.keyvault.v7_3_preview.models.IssuerCredentials
:param organization_details: Details of the organization as provided to the issuer.
:type organization_details: ~azure.keyvault.v7_3_preview.models.OrganizationDetails
:param attributes: Attributes of the issuer object.
:type attributes: ~azure.keyvault.v7_3_preview.models.IssuerAttributes
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'IssuerCredentials'},
'organization_details': {'key': 'org_details', 'type': 'OrganizationDetails'},
'attributes': {'key': 'attributes', 'type': 'IssuerAttributes'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
credentials: Optional["IssuerCredentials"] = None,
organization_details: Optional["OrganizationDetails"] = None,
attributes: Optional["IssuerAttributes"] = None,
**kwargs
):
super(CertificateIssuerUpdateParameters, self).__init__(**kwargs)
self.provider = provider
self.credentials = credentials
self.organization_details = organization_details
self.attributes = attributes
class CertificateItem(msrest.serialization.Model):
"""The certificate item containing certificate metadata.
:param id: Certificate identifier.
:type id: str
:param attributes: The certificate management attributes.
:type attributes: ~azure.keyvault.v7_3_preview.models.CertificateAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:param x509_thumbprint: Thumbprint of the certificate.
:type x509_thumbprint: bytes
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'CertificateAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'x509_thumbprint': {'key': 'x5t', 'type': 'base64'},
}
def __init__(
self,
*,
id: Optional[str] = None,
attributes: Optional["CertificateAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
x509_thumbprint: Optional[bytes] = None,
**kwargs
):
super(CertificateItem, self).__init__(**kwargs)
self.id = id
self.attributes = attributes
self.tags = tags
self.x509_thumbprint = x509_thumbprint
class CertificateListResult(msrest.serialization.Model):
"""The certificate list result.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: A response message containing a list of certificates in the key vault along with a
link to the next page of certificates.
:vartype value: list[~azure.keyvault.v7_3_preview.models.CertificateItem]
:ivar next_link: The URL to get the next set of certificates.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[CertificateItem]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CertificateListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class CertificateMergeParameters(msrest.serialization.Model):
"""The certificate merge parameters.
All required parameters must be populated in order to send to Azure.
:param x509_certificates: Required. The certificate or the certificate chain to merge.
:type x509_certificates: list[bytearray]
:param certificate_attributes: The attributes of the certificate (optional).
:type certificate_attributes: ~azure.keyvault.v7_3_preview.models.CertificateAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
"""
_validation = {
'x509_certificates': {'required': True},
}
_attribute_map = {
'x509_certificates': {'key': 'x5c', 'type': '[bytearray]'},
'certificate_attributes': {'key': 'attributes', 'type': 'CertificateAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
x509_certificates: List[bytearray],
certificate_attributes: Optional["CertificateAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(CertificateMergeParameters, self).__init__(**kwargs)
self.x509_certificates = x509_certificates
self.certificate_attributes = certificate_attributes
self.tags = tags
class CertificateOperation(msrest.serialization.Model):
"""A certificate operation is returned in case of asynchronous requests.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The certificate id.
:vartype id: str
:param issuer_parameters: Parameters for the issuer of the X509 component of a certificate.
:type issuer_parameters: ~azure.keyvault.v7_3_preview.models.IssuerParameters
:param csr: The certificate signing request (CSR) that is being used in the certificate
operation.
:type csr: bytearray
:param cancellation_requested: Indicates if cancellation was requested on the certificate
operation.
:type cancellation_requested: bool
:param status: Status of the certificate operation.
:type status: str
:param status_details: The status details of the certificate operation.
:type status_details: str
:param error: Error encountered, if any, during the certificate operation.
:type error: ~azure.keyvault.v7_3_preview.models.Error
:param target: Location which contains the result of the certificate operation.
:type target: str
:param request_id: Identifier for the certificate operation.
:type request_id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'issuer_parameters': {'key': 'issuer', 'type': 'IssuerParameters'},
'csr': {'key': 'csr', 'type': 'bytearray'},
'cancellation_requested': {'key': 'cancellation_requested', 'type': 'bool'},
'status': {'key': 'status', 'type': 'str'},
'status_details': {'key': 'status_details', 'type': 'str'},
'error': {'key': 'error', 'type': 'Error'},
'target': {'key': 'target', 'type': 'str'},
'request_id': {'key': 'request_id', 'type': 'str'},
}
def __init__(
self,
*,
issuer_parameters: Optional["IssuerParameters"] = None,
csr: Optional[bytearray] = None,
cancellation_requested: Optional[bool] = None,
status: Optional[str] = None,
status_details: Optional[str] = None,
error: Optional["Error"] = None,
target: Optional[str] = None,
request_id: Optional[str] = None,
**kwargs
):
super(CertificateOperation, self).__init__(**kwargs)
self.id = None
self.issuer_parameters = issuer_parameters
self.csr = csr
self.cancellation_requested = cancellation_requested
self.status = status
self.status_details = status_details
self.error = error
self.target = target
self.request_id = request_id
class CertificateOperationUpdateParameter(msrest.serialization.Model):
"""The certificate operation update parameters.
All required parameters must be populated in order to send to Azure.
:param cancellation_requested: Required. Indicates if cancellation was requested on the
certificate operation.
:type cancellation_requested: bool
"""
_validation = {
'cancellation_requested': {'required': True},
}
_attribute_map = {
'cancellation_requested': {'key': 'cancellation_requested', 'type': 'bool'},
}
def __init__(
self,
*,
cancellation_requested: bool,
**kwargs
):
super(CertificateOperationUpdateParameter, self).__init__(**kwargs)
self.cancellation_requested = cancellation_requested
class CertificatePolicy(msrest.serialization.Model):
"""Management policy for a certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The certificate id.
:vartype id: str
:param key_properties: Properties of the key backing a certificate.
:type key_properties: ~azure.keyvault.v7_3_preview.models.KeyProperties
:param secret_properties: Properties of the secret backing a certificate.
:type secret_properties: ~azure.keyvault.v7_3_preview.models.SecretProperties
:param x509_certificate_properties: Properties of the X509 component of a certificate.
:type x509_certificate_properties:
~azure.keyvault.v7_3_preview.models.X509CertificateProperties
:param lifetime_actions: Actions that will be performed by Key Vault over the lifetime of a
certificate.
:type lifetime_actions: list[~azure.keyvault.v7_3_preview.models.LifetimeAction]
:param issuer_parameters: Parameters for the issuer of the X509 component of a certificate.
:type issuer_parameters: ~azure.keyvault.v7_3_preview.models.IssuerParameters
:param attributes: The certificate attributes.
:type attributes: ~azure.keyvault.v7_3_preview.models.CertificateAttributes
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'key_properties': {'key': 'key_props', 'type': 'KeyProperties'},
'secret_properties': {'key': 'secret_props', 'type': 'SecretProperties'},
'x509_certificate_properties': {'key': 'x509_props', 'type': 'X509CertificateProperties'},
'lifetime_actions': {'key': 'lifetime_actions', 'type': '[LifetimeAction]'},
'issuer_parameters': {'key': 'issuer', 'type': 'IssuerParameters'},
'attributes': {'key': 'attributes', 'type': 'CertificateAttributes'},
}
def __init__(
self,
*,
key_properties: Optional["KeyProperties"] = None,
secret_properties: Optional["SecretProperties"] = None,
x509_certificate_properties: Optional["X509CertificateProperties"] = None,
lifetime_actions: Optional[List["LifetimeAction"]] = None,
issuer_parameters: Optional["IssuerParameters"] = None,
attributes: Optional["CertificateAttributes"] = None,
**kwargs
):
super(CertificatePolicy, self).__init__(**kwargs)
self.id = None
self.key_properties = key_properties
self.secret_properties = secret_properties
self.x509_certificate_properties = x509_certificate_properties
self.lifetime_actions = lifetime_actions
self.issuer_parameters = issuer_parameters
self.attributes = attributes
class CertificateRestoreParameters(msrest.serialization.Model):
"""The certificate restore parameters.
All required parameters must be populated in order to send to Azure.
:param certificate_bundle_backup: Required. The backup blob associated with a certificate
bundle.
:type certificate_bundle_backup: bytes
"""
_validation = {
'certificate_bundle_backup': {'required': True},
}
_attribute_map = {
'certificate_bundle_backup': {'key': 'value', 'type': 'base64'},
}
def __init__(
self,
*,
certificate_bundle_backup: bytes,
**kwargs
):
super(CertificateRestoreParameters, self).__init__(**kwargs)
self.certificate_bundle_backup = certificate_bundle_backup
class CertificateUpdateParameters(msrest.serialization.Model):
"""The certificate update parameters.
:param certificate_policy: The management policy for the certificate.
:type certificate_policy: ~azure.keyvault.v7_3_preview.models.CertificatePolicy
:param certificate_attributes: The attributes of the certificate (optional).
:type certificate_attributes: ~azure.keyvault.v7_3_preview.models.CertificateAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
"""
_attribute_map = {
'certificate_policy': {'key': 'policy', 'type': 'CertificatePolicy'},
'certificate_attributes': {'key': 'attributes', 'type': 'CertificateAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
certificate_policy: Optional["CertificatePolicy"] = None,
certificate_attributes: Optional["CertificateAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(CertificateUpdateParameters, self).__init__(**kwargs)
self.certificate_policy = certificate_policy
self.certificate_attributes = certificate_attributes
self.tags = tags
class Contact(msrest.serialization.Model):
"""The contact information for the vault certificates.
:param email_address: Email address.
:type email_address: str
:param name: Name.
:type name: str
:param phone: Phone number.
:type phone: str
"""
_attribute_map = {
'email_address': {'key': 'email', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'phone': {'key': 'phone', 'type': 'str'},
}
def __init__(
self,
*,
email_address: Optional[str] = None,
name: Optional[str] = None,
phone: Optional[str] = None,
**kwargs
):
super(Contact, self).__init__(**kwargs)
self.email_address = email_address
self.name = name
self.phone = phone
class Contacts(msrest.serialization.Model):
"""The contacts for the vault certificates.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Identifier for the contacts collection.
:vartype id: str
:param contact_list: The contact list for the vault certificates.
:type contact_list: list[~azure.keyvault.v7_3_preview.models.Contact]
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'contact_list': {'key': 'contacts', 'type': '[Contact]'},
}
def __init__(
self,
*,
contact_list: Optional[List["Contact"]] = None,
**kwargs
):
super(Contacts, self).__init__(**kwargs)
self.id = None
self.contact_list = contact_list
class DeletedCertificateBundle(CertificateBundle):
"""A Deleted Certificate consisting of its previous id, attributes and its tags, as well as information on when it will be purged.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The certificate id.
:vartype id: str
:ivar kid: The key id.
:vartype kid: str
:ivar sid: The secret id.
:vartype sid: str
:ivar x509_thumbprint: Thumbprint of the certificate.
:vartype x509_thumbprint: bytes
:ivar policy: The management policy.
:vartype policy: ~azure.keyvault.v7_3_preview.models.CertificatePolicy
:param cer: CER contents of x509 certificate.
:type cer: bytearray
:param content_type: The content type of the secret. eg. 'application/x-pem-file' or
'application/x-pkcs12',.
:type content_type: str
:param attributes: The certificate attributes.
:type attributes: ~azure.keyvault.v7_3_preview.models.CertificateAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:param recovery_id: The url of the recovery object, used to identify and recover the deleted
certificate.
:type recovery_id: str
:ivar scheduled_purge_date: The time when the certificate is scheduled to be purged, in UTC.
:vartype scheduled_purge_date: ~datetime.datetime
:ivar deleted_date: The time when the certificate was deleted, in UTC.
:vartype deleted_date: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'kid': {'readonly': True},
'sid': {'readonly': True},
'x509_thumbprint': {'readonly': True},
'policy': {'readonly': True},
'scheduled_purge_date': {'readonly': True},
'deleted_date': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'kid': {'key': 'kid', 'type': 'str'},
'sid': {'key': 'sid', 'type': 'str'},
'x509_thumbprint': {'key': 'x5t', 'type': 'base64'},
'policy': {'key': 'policy', 'type': 'CertificatePolicy'},
'cer': {'key': 'cer', 'type': 'bytearray'},
'content_type': {'key': 'contentType', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'CertificateAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'recovery_id': {'key': 'recoveryId', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'unix-time'},
'deleted_date': {'key': 'deletedDate', 'type': 'unix-time'},
}
def __init__(
self,
*,
cer: Optional[bytearray] = None,
content_type: Optional[str] = None,
attributes: Optional["CertificateAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
recovery_id: Optional[str] = None,
**kwargs
):
super(DeletedCertificateBundle, self).__init__(cer=cer, content_type=content_type, attributes=attributes, tags=tags, **kwargs)
self.recovery_id = recovery_id
self.scheduled_purge_date = None
self.deleted_date = None
class DeletedCertificateItem(CertificateItem):
"""The deleted certificate item containing metadata about the deleted certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Certificate identifier.
:type id: str
:param attributes: The certificate management attributes.
:type attributes: ~azure.keyvault.v7_3_preview.models.CertificateAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:param x509_thumbprint: Thumbprint of the certificate.
:type x509_thumbprint: bytes
:param recovery_id: The url of the recovery object, used to identify and recover the deleted
certificate.
:type recovery_id: str
:ivar scheduled_purge_date: The time when the certificate is scheduled to be purged, in UTC.
:vartype scheduled_purge_date: ~datetime.datetime
:ivar deleted_date: The time when the certificate was deleted, in UTC.
:vartype deleted_date: ~datetime.datetime
"""
_validation = {
'scheduled_purge_date': {'readonly': True},
'deleted_date': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'CertificateAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'x509_thumbprint': {'key': 'x5t', 'type': 'base64'},
'recovery_id': {'key': 'recoveryId', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'unix-time'},
'deleted_date': {'key': 'deletedDate', 'type': 'unix-time'},
}
def __init__(
self,
*,
id: Optional[str] = None,
attributes: Optional["CertificateAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
x509_thumbprint: Optional[bytes] = None,
recovery_id: Optional[str] = None,
**kwargs
):
super(DeletedCertificateItem, self).__init__(id=id, attributes=attributes, tags=tags, x509_thumbprint=x509_thumbprint, **kwargs)
self.recovery_id = recovery_id
self.scheduled_purge_date = None
self.deleted_date = None
class DeletedCertificateListResult(msrest.serialization.Model):
"""A list of certificates that have been deleted in this vault.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: A response message containing a list of deleted certificates in the vault along
with a link to the next page of deleted certificates.
:vartype value: list[~azure.keyvault.v7_3_preview.models.DeletedCertificateItem]
:ivar next_link: The URL to get the next set of deleted certificates.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DeletedCertificateItem]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DeletedCertificateListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class Error(msrest.serialization.Model):
"""The key vault server error.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar inner_error: The key vault server error.
:vartype inner_error: ~azure.keyvault.v7_3_preview.models.Error
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'inner_error': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'inner_error': {'key': 'innererror', 'type': 'Error'},
}
def __init__(
self,
**kwargs
):
super(Error, self).__init__(**kwargs)
self.code = None
self.message = None
self.inner_error = None
class IssuerAttributes(msrest.serialization.Model):
"""The attributes of an issuer managed by the Key Vault service.
Variables are only populated by the server, and will be ignored when sending a request.
:param enabled: Determines whether the issuer is enabled.
:type enabled: bool
:ivar created: Creation time in UTC.
:vartype created: ~datetime.datetime
:ivar updated: Last updated time in UTC.
:vartype updated: ~datetime.datetime
"""
_validation = {
'created': {'readonly': True},
'updated': {'readonly': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'created': {'key': 'created', 'type': 'unix-time'},
'updated': {'key': 'updated', 'type': 'unix-time'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
**kwargs
):
super(IssuerAttributes, self).__init__(**kwargs)
self.enabled = enabled
self.created = None
self.updated = None
class IssuerBundle(msrest.serialization.Model):
"""The issuer for Key Vault certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Identifier for the issuer object.
:vartype id: str
:param provider: The issuer provider.
:type provider: str
:param credentials: The credentials to be used for the issuer.
:type credentials: ~azure.keyvault.v7_3_preview.models.IssuerCredentials
:param organization_details: Details of the organization as provided to the issuer.
:type organization_details: ~azure.keyvault.v7_3_preview.models.OrganizationDetails
:param attributes: Attributes of the issuer object.
:type attributes: ~azure.keyvault.v7_3_preview.models.IssuerAttributes
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'provider': {'key': 'provider', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'IssuerCredentials'},
'organization_details': {'key': 'org_details', 'type': 'OrganizationDetails'},
'attributes': {'key': 'attributes', 'type': 'IssuerAttributes'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
credentials: Optional["IssuerCredentials"] = None,
organization_details: Optional["OrganizationDetails"] = None,
attributes: Optional["IssuerAttributes"] = None,
**kwargs
):
super(IssuerBundle, self).__init__(**kwargs)
self.id = None
self.provider = provider
self.credentials = credentials
self.organization_details = organization_details
self.attributes = attributes
class IssuerCredentials(msrest.serialization.Model):
"""The credentials to be used for the certificate issuer.
:param account_id: The user name/account name/account id.
:type account_id: str
:param password: The password/secret/account key.
:type password: str
"""
_attribute_map = {
'account_id': {'key': 'account_id', 'type': 'str'},
'password': {'key': 'pwd', 'type': 'str'},
}
def __init__(
self,
*,
account_id: Optional[str] = None,
password: Optional[str] = None,
**kwargs
):
super(IssuerCredentials, self).__init__(**kwargs)
self.account_id = account_id
self.password = password
class IssuerParameters(msrest.serialization.Model):
"""Parameters for the issuer of the X509 component of a certificate.
:param name: Name of the referenced issuer object or reserved names; for example, 'Self' or
'Unknown'.
:type name: str
:param certificate_type: Certificate type as supported by the provider (optional); for example
'OV-SSL', 'EV-SSL'.
:type certificate_type: str
:param certificate_transparency: Indicates if the certificates generated under this policy
should be published to certificate transparency logs.
:type certificate_transparency: bool
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'certificate_type': {'key': 'cty', 'type': 'str'},
'certificate_transparency': {'key': 'cert_transparency', 'type': 'bool'},
}
def __init__(
self,
*,
name: Optional[str] = None,
certificate_type: Optional[str] = None,
certificate_transparency: Optional[bool] = None,
**kwargs
):
super(IssuerParameters, self).__init__(**kwargs)
self.name = name
self.certificate_type = certificate_type
self.certificate_transparency = certificate_transparency
class KeyProperties(msrest.serialization.Model):
"""Properties of the key pair backing a certificate.
:param exportable: Indicates if the private key can be exported.
:type exportable: bool
:param key_type: The type of key pair to be used for the certificate. Possible values include:
"EC", "EC-HSM", "RSA", "RSA-HSM", "oct", "oct-HSM".
:type key_type: str or ~azure.keyvault.v7_3_preview.models.JsonWebKeyType
:param key_size: The key size in bits. For example: 2048, 3072, or 4096 for RSA.
:type key_size: int
:param reuse_key: Indicates if the same key pair will be used on certificate renewal.
:type reuse_key: bool
:param curve: Elliptic curve name. For valid values, see JsonWebKeyCurveName. Possible values
include: "P-256", "P-384", "P-521", "P-256K".
:type curve: str or ~azure.keyvault.v7_3_preview.models.JsonWebKeyCurveName
"""
_attribute_map = {
'exportable': {'key': 'exportable', 'type': 'bool'},
'key_type': {'key': 'kty', 'type': 'str'},
'key_size': {'key': 'key_size', 'type': 'int'},
'reuse_key': {'key': 'reuse_key', 'type': 'bool'},
'curve': {'key': 'crv', 'type': 'str'},
}
def __init__(
self,
*,
exportable: Optional[bool] = None,
key_type: Optional[Union[str, "JsonWebKeyType"]] = None,
key_size: Optional[int] = None,
reuse_key: Optional[bool] = None,
curve: Optional[Union[str, "JsonWebKeyCurveName"]] = None,
**kwargs
):
super(KeyProperties, self).__init__(**kwargs)
self.exportable = exportable
self.key_type = key_type
self.key_size = key_size
self.reuse_key = reuse_key
self.curve = curve
class KeyVaultError(msrest.serialization.Model):
"""The key vault error exception.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar error: The key vault server error.
:vartype error: ~azure.keyvault.v7_3_preview.models.Error
"""
_validation = {
'error': {'readonly': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'Error'},
}
def __init__(
self,
**kwargs
):
super(KeyVaultError, self).__init__(**kwargs)
self.error = None
class LifetimeAction(msrest.serialization.Model):
"""Action and its trigger that will be performed by Key Vault over the lifetime of a certificate.
:param trigger: The condition that will execute the action.
:type trigger: ~azure.keyvault.v7_3_preview.models.Trigger
:param action: The action that will be executed.
:type action: ~azure.keyvault.v7_3_preview.models.Action
"""
_attribute_map = {
'trigger': {'key': 'trigger', 'type': 'Trigger'},
'action': {'key': 'action', 'type': 'Action'},
}
def __init__(
self,
*,
trigger: Optional["Trigger"] = None,
action: Optional["Action"] = None,
**kwargs
):
super(LifetimeAction, self).__init__(**kwargs)
self.trigger = trigger
self.action = action
class OrganizationDetails(msrest.serialization.Model):
"""Details of the organization of the certificate issuer.
:param id: Id of the organization.
:type id: str
:param admin_details: Details of the organization administrator.
:type admin_details: list[~azure.keyvault.v7_3_preview.models.AdministratorDetails]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'admin_details': {'key': 'admin_details', 'type': '[AdministratorDetails]'},
}
def __init__(
self,
*,
id: Optional[str] = None,
admin_details: Optional[List["AdministratorDetails"]] = None,
**kwargs
):
super(OrganizationDetails, self).__init__(**kwargs)
self.id = id
self.admin_details = admin_details
class PendingCertificateSigningRequestResult(msrest.serialization.Model):
"""The pending certificate signing request result.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The pending certificate signing request as Base64 encoded string.
:vartype value: str
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PendingCertificateSigningRequestResult, self).__init__(**kwargs)
self.value = None
class SecretProperties(msrest.serialization.Model):
"""Properties of the key backing a certificate.
:param content_type: The media type (MIME type).
:type content_type: str
"""
_attribute_map = {
'content_type': {'key': 'contentType', 'type': 'str'},
}
def __init__(
self,
*,
content_type: Optional[str] = None,
**kwargs
):
super(SecretProperties, self).__init__(**kwargs)
self.content_type = content_type
class SubjectAlternativeNames(msrest.serialization.Model):
"""The subject alternate names of a X509 object.
:param emails: Email addresses.
:type emails: list[str]
:param dns_names: Domain names.
:type dns_names: list[str]
:param upns: User principal names.
:type upns: list[str]
"""
_attribute_map = {
'emails': {'key': 'emails', 'type': '[str]'},
'dns_names': {'key': 'dns_names', 'type': '[str]'},
'upns': {'key': 'upns', 'type': '[str]'},
}
def __init__(
self,
*,
emails: Optional[List[str]] = None,
dns_names: Optional[List[str]] = None,
upns: Optional[List[str]] = None,
**kwargs
):
super(SubjectAlternativeNames, self).__init__(**kwargs)
self.emails = emails
self.dns_names = dns_names
self.upns = upns
class Trigger(msrest.serialization.Model):
"""A condition to be satisfied for an action to be executed.
:param lifetime_percentage: Percentage of lifetime at which to trigger. Value should be between
1 and 99.
:type lifetime_percentage: int
:param days_before_expiry: Days before expiry to attempt renewal. Value should be between 1 and
validity_in_months multiplied by 27. If validity_in_months is 36, then value should be between
1 and 972 (36 * 27).
:type days_before_expiry: int
"""
_validation = {
'lifetime_percentage': {'maximum': 99, 'minimum': 1},
}
_attribute_map = {
'lifetime_percentage': {'key': 'lifetime_percentage', 'type': 'int'},
'days_before_expiry': {'key': 'days_before_expiry', 'type': 'int'},
}
def __init__(
self,
*,
lifetime_percentage: Optional[int] = None,
days_before_expiry: Optional[int] = None,
**kwargs
):
super(Trigger, self).__init__(**kwargs)
self.lifetime_percentage = lifetime_percentage
self.days_before_expiry = days_before_expiry
class X509CertificateProperties(msrest.serialization.Model):
"""Properties of the X509 component of a certificate.
:param subject: The subject name. Should be a valid X509 distinguished Name.
:type subject: str
:param ekus: The enhanced key usage.
:type ekus: list[str]
:param subject_alternative_names: The subject alternative names.
:type subject_alternative_names: ~azure.keyvault.v7_3_preview.models.SubjectAlternativeNames
:param key_usage: List of key usages.
:type key_usage: list[str or ~azure.keyvault.v7_3_preview.models.KeyUsageType]
:param validity_in_months: The duration that the certificate is valid in months.
:type validity_in_months: int
"""
_validation = {
'validity_in_months': {'minimum': 0},
}
_attribute_map = {
'subject': {'key': 'subject', 'type': 'str'},
'ekus': {'key': 'ekus', 'type': '[str]'},
'subject_alternative_names': {'key': 'sans', 'type': 'SubjectAlternativeNames'},
'key_usage': {'key': 'key_usage', 'type': '[str]'},
'validity_in_months': {'key': 'validity_months', 'type': 'int'},
}
def __init__(
self,
*,
subject: Optional[str] = None,
ekus: Optional[List[str]] = None,
subject_alternative_names: Optional["SubjectAlternativeNames"] = None,
key_usage: Optional[List[Union[str, "KeyUsageType"]]] = None,
validity_in_months: Optional[int] = None,
**kwargs
):
super(X509CertificateProperties, self).__init__(**kwargs)
self.subject = subject
self.ekus = ekus
self.subject_alternative_names = subject_alternative_names
self.key_usage = key_usage
self.validity_in_months = validity_in_months
|
|
# coding: utf-8
import os
from lxml import etree as ET
import time
import shortuuid
import uuid
from PIL import Image
import exifread
import shutil
import sys
import traceback
from subprocess import Popen, PIPE
#from http://stackoverflow.com/questions/14996453/python-libraries-to-calculate-human-readable-filesize-from-bytes
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def humansize(nbytes):
if nbytes == 0: return '0 B'
i = 0
while nbytes >= 1024 and i < len(suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, suffixes[i])
noDateCount = 0
#batchDir = "/media/bcadmin/SPE/Electronic_Records_Library/ua395/fromDVDs"
#batchDir = "/media/bcadmin/SPE/Electronic_Records_Library/ua395/fromDVDs2"
#batchDir = "/media/bcadmin/SPE/Electronic_Records_Library/ua395/fromDVDs3"
batchDir = "/media/bcadmin/SPE/Electronic_Records_Library/ua395/fromDVDs4"
descDir = "/home/bcadmin/Desktop/Processing/ua395"
accessionDir = "/media/bcadmin/SPE/Electronic_Records_Library/ua395/toSIP"
startTime = time.time()
startTimeReadable = str(time.strftime("%Y-%m-%d %H:%M:%S"))
print "Start Time: " + startTimeReadable
folderTotal = 0
for image in os.listdir(batchDir):
folderTotal = folderTotal + 1
totalVerified = 0
diskStartTime = time.time()
totalTime = 0
parser = ET.XMLParser(remove_blank_text=True)
Order1Input = ET.parse(os.path.join(descDir, "OrderEntry.xml"), parser)
Order2Input = ET.parse(os.path.join(descDir, "OrderEntry2.xml"), parser)
OrderEntry = Order1Input.getroot()
OrderEntry2 = Order2Input.getroot()
#make SIP metadata file
collectionID = "ua395"
accessionNumber = collectionID + "-" + str(shortuuid.uuid())
sipRoot = ET.Element("accession")
sipRoot.set("version", "0.1")
sipRoot.set("number", accessionNumber)
submitTime = time.time()
submitTimeReadable = str(time.strftime("%Y-%m-%d %H:%M:%S"))
sipRoot.set("submitted", submitTimeReadable)
sipRoot.set("submittedPosix", str(submitTime))
#create profile
profileXML = ET.SubElement(sipRoot, "profile")
notesXML = ET.SubElement(profileXML, "notes")
notesXML.text = ""
creatorXML = ET.SubElement(profileXML, "creator")
creatorXML.text = "Digital Media Unit"
creatorIdXML = ET.SubElement(profileXML, "creatorId")
creatorIdXML.text = collectionID
donorXML = ET.SubElement(profileXML, "donor")
donorXML.text = "Mark Schmidt"
roleXML = ET.SubElement(profileXML, "role")
roleXML.text = "Campus Photographer"
emailXML = ET.SubElement(profileXML, "email")
emailXML.text = "pmiller2@albany.edu"
officeXML = ET.SubElement(profileXML, "office")
officeXML.text = "University Hall 202"
address1XML = ET.SubElement(profileXML, "address1")
address1XML.text = "1400 Washington Ave"
address2XML = ET.SubElement(profileXML, "address2")
address2XML.text = "Albany, NY 12222"
address3XML = ET.SubElement(profileXML, "address3")
address3XML.text = ""
methodXML = ET.SubElement(profileXML, "method")
methodXML.text = "Imaged from DVD and CD-R and extracted from raw files into JPGs with ImageMagick(https://github.com/UAlbanyArchives/ua395)"
locationXML = ET.SubElement(profileXML, "location")
locationXML.text = batchDir
extentXML = ET.SubElement(profileXML, "extent")
#create accession folder
accession = os.path.join(accessionDir, accessionNumber)
if not os.path.isdir(accession):
os.mkdir(accession)
dataDir = os.path.join(accession, "data")
if not os.path.isdir(dataDir):
os.mkdir(dataDir)
metadataString = ET.tostring(sipRoot, pretty_print=True, xml_declaration=True, encoding="utf-8")
metadataFile = open(os.path.join(accession, accessionNumber + ".xml"), "w")
metadataFile.write(metadataString)
metadataFile.close()
totalSize = 0
folderCount = 0
for folderName in os.listdir(batchDir):
folderCount = folderCount + 1
folderPath = os.path.join(batchDir, folderName)
print "reading " + folderName
print str(folderCount) + " of " + str(folderTotal)
for job in os.listdir(folderPath):
jobDir = os.path.join(folderPath, job)
#special fix for incorrect job number for John Lewis photos
if job == "20000935":
job = "20001244"
jobMove = os.path.join(dataDir, job)
if not os.path.isdir(jobMove):
os.mkdir(jobMove)
jobRecord = ET.Element("folder")
idXML = ET.SubElement(jobRecord, "id")
idXML.text = str(uuid.uuid4())
pathXML = ET.SubElement(jobRecord, "path")
pathXML.text = os.path.join(folderPath, job)
descriptionXML = ET.SubElement(jobRecord, "description")
accessXML = ET.SubElement(jobRecord, "access")
curatorialEventsXML = ET.SubElement(jobRecord, "curatorialEvents")
eventXML = ET.SubElement(curatorialEventsXML, "event")
eventXML.set("timestamp", str(time.time()))
eventXML.set("timestampHuman", str(time.strftime("%Y-%m-%d %H:%M:%S")))
eventXML.text = "Imaged from optical media with dd, carved from image with fiwalk and icat, converted all raw to jpg"
#special fix for John Lewis photos
if job == "20001244":
eventXML = ET.SubElement(curatorialEventsXML, "event")
eventXML.set("timestamp", str(time.time()))
eventXML.set("timestampHuman", str(time.strftime("%Y-%m-%d %H:%M:%S")))
eventXML.text = "Original job number incorrect, changed 20000935 to 20001244"
recordEventsXML = ET.SubElement(jobRecord, "recordEvents")
#if old job number
if job.isdigit() and job.startswith("200"):
for order in OrderEntry:
if order.tag == "OrderEntry":
if order.find("Job_x0020_Number").text == job:
match = order
if match is None:
for order in OrderEntry2:
if order.tag == "OrderEntry":
if order.find("Job_x0020_Number").text == job:
match = order
if match is None:
print "NO MATCH FOR: " + str(job)
sys.exit("NO MATCH FOR: " + str(job))
else:
description = match.find("Description").text.replace("\n", " ")
description = " ".join(description.split())
if "photo session:" in description:
description = description.replace("photo session:", "").strip()
elif "photo session" in description:
description = description.replace("photo session", "").strip()
if not match.find("Department") is None and match.find("Department").text:
departCheck = True
depart = match.find("Department").text
else:
departCheck = False
if "****" in description:
description = description.split("****")[0]
if match.find("DateDue") is None:
if match.find("Date") is None:
print "DATE ERROR______________________________________________"
else:
dbDate = match.find("Date").text
else:
dbDate = match.find("DateDue").text
jobRecord.set("name", job)
if departCheck == True:
descriptionXML.text = depart + ": " + description.strip()
else:
descriptionXML.text = description.strip()
timestamp = ET.Element("timestamp")
timestamp.text = dbDate.replace("T", " ")
timestamp.set("timeType", "iso8601")
timestamp.set("parser", "Database Entry")
recordEventsXML.append(timestamp)
eventXML = ET.SubElement(curatorialEventsXML, "event")
eventXML.set("timestamp", str(time.time()))
eventXML.set("timestampHuman", str(time.strftime("%Y-%m-%d %H:%M:%S")))
eventXML.text = "description record extracted from photographer's Microsoft Access database by Job number"
else:
try:
jobDesc = os.listdir(jobDir)[0]
except:
print "no subpath for " + job
jobDesc = job
jobRecord.set("name", jobDesc)
descriptionXML.text = jobDesc
eventXML = ET.SubElement(curatorialEventsXML, "event")
eventXML.set("timestamp", str(time.time()))
eventXML.set("timestampHuman", str(time.strftime("%Y-%m-%d %H:%M:%S")))
eventXML.text = "description record used original folder name"
for rootDir, directs, images in os.walk(jobDir):
imageCount = 0
for image in images:
if image.lower().endswith(".jpg"):
imageCount = imageCount + 1
if imageCount == 2:
imageFile = os.path.join(rootDir, image)
try:
exifDate = Image.open(imageFile)._getexif()[36867]
timestamp = ET.Element("timestamp")
timestamp.text = exifDate
timestamp.set("timeType", "iso8601")
timestamp.set("parser", "EXIF")
recordEventsXML.append(timestamp)
except:
try:
exifDate = Image.open(imageFile)._getexif()[306]
timestamp = ET.Element("timestamp")
timestamp.text = exifDate
timestamp.set("timeType", "iso8601")
timestamp.set("parser", "EXIF")
recordEventsXML.append(timestamp)
except:
continue
for root, dirs, files in os.walk(jobDir):
for file in files:
fileCheck = file.lower()
if fileCheck == "thumbs.db" or fileCheck == "desktop.ini" or fileCheck == ".ds_store":
pass
else:
filePath = os.path.join(root, file).decode(sys.getdefaultencoding())
fileSize = os.path.getsize(filePath)
if fileSize > 0:
totalSize = totalSize + fileSize
imageRecord = ET.Element("file")
imageRecord.set("name", file)
idXML = ET.SubElement(imageRecord, "id")
idXML.text = str(uuid.uuid4())
pathXML = ET.SubElement(imageRecord, "path")
pathXML.text = filePath
descriptionXML = ET.SubElement(imageRecord, "description")
accessXML = ET.SubElement(imageRecord, "access")
curatorialEventsXML = ET.SubElement(imageRecord, "curatorialEvents")
eventXML = ET.SubElement(curatorialEventsXML, "event")
eventXML.set("timestamp", str(time.time()))
eventXML.set("timestampHuman", str(time.strftime("%Y-%m-%d %H:%M:%S")))
eventXML.text = "Imaged from optical media with dd, carved from image with fiwalk and icat, converted all raw to jpg"
recordEventsXML = ET.SubElement(imageRecord, "recordEvents")
try:
exifImage = Image.open(filePath)
exifDate = exifImage._getexif()[36867]
timestamp = ET.Element("timestamp")
timestamp.text = exifDate
timestamp.set("timeType", "iso8601")
timestamp.set("parser", "EXIF.DateTimeOriginal")
recordEventsXML.append(timestamp)
except:
try:
exifImage = Image.open(filePath)
exifDate = exifImage._getexif()[306]
timestamp = ET.Element("timestamp")
timestamp.text = exifDate
timestamp.set("timeType", "iso8601")
timestamp.set("parser", "EXIF.DateTime")
recordEventsXML.append(timestamp)
except:
noDateCount = noDateCount + 1
pass
jobRecord.append(imageRecord)
if os.path.isfile(os.path.join(jobMove, file)):
def dupfileConflict(file, dupNumber, jobMove):
fileRoot, fileExt = os.path.splitext(file)
dupFile = fileRoot + "[" + str(dupNumber) + "]" + "." + fileExt
if os.path.isfile(os.path.join(jobMove, dupFile)):
dupNumber = dupNumber + 1
print "<----------------- More than 2"
dupNumber = dupfileConflict(file, dupNumber, jobMove)
else:
return dupNumber
dupNumber = 2
dupNumber = dupfileConflict(file, dupNumber, jobMove)
fileRoot, fileExt = os.path.splitext(file)
dupFile = fileRoot + "[" + str(dupNumber) + "]" + fileExt
print dupFile
try:
jobMove = os.path.join(jobMove, dupFile)
except:
print jobMove
print file
print dupFile
sys.exit("MOVE ERROR: \"" + jobMove + "\" --> \"" + dupFile + "\"")
eventXML = ET.SubElement(curatorialEventsXML, "event")
eventXML.set("timestamp", str(time.time()))
eventXML.set("timestampHuman", str(time.strftime("%Y-%m-%d %H:%M:%S")))
eventXML.text = "File with same name already present, original files may have been in different directories. Renamed " + file + " to " + dupFile + "."
if os.name == "nt":
try:
shutil.copy2(filePath, jobMove)
except:
sys.exit("shutil.copy2() ERROR: shutil.copy2(\"" + filePath + "\", \"" + jobMove + "\")")
else:
try:
moveCmd = "cp -p \"" + filePath + "\" \"" + jobMove + "\""
moveFile = Popen(moveCmd, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = moveFile.communicate()
if len(stderr) > 0:
print stderr
sys.exit("Unix cp -p error: " + stderr)
if len(stdout) > 0:
sys.exit("Unix cp -p output: " + stdout)
except:
sys.exit("cp COMMAND ERROR: " + moveCmd)
else:
print "NO FILE SIZE FOR " + filePath
eventXML = ET.SubElement(curatorialEventsXML, "event")
eventXML.set("timestamp", str(time.time()))
eventXML.set("timestampHuman", str(time.strftime("%Y-%m-%d %H:%M:%S")))
eventXML.text = filePath + " may have been corrupted. icat resulted in file with no size, so file was ignored."
sipRoot.append(jobRecord)
metadataString = ET.tostring(sipRoot, pretty_print=True, xml_declaration=True, encoding="utf-8")
metadataFile = open(os.path.join(accession, accessionNumber + ".xml"), "w")
metadataFile.write(metadataString)
metadataFile.close()
diskProcessTime = time.time() - diskStartTime
totalTime = totalTime + diskProcessTime
print "Process took " + str(diskProcessTime) + " seconds or " + str(diskProcessTime/60) + " minutes or " + str(diskProcessTime/3600) + " hours"
avgTime = totalTime/folderCount
print "Average is " + str(avgTime)
remaning = folderTotal - folderCount
print str(remaning) + " Remaining"
estimateTime = avgTime*remaning
print "Estimated time left: " + str(estimateTime) + " seconds or " + str(estimateTime/60) + " minutes or " + str(estimateTime/3600) + " hours"
diskStartTime = time.time()
readableSize = humansize(totalSize)
sipRoot.find("profile/extent").set("unit", "bytes")
sipRoot.find("profile/extent").text = str(totalSize)
sipRoot.find("profile/extent").set("humanReadable", str(readableSize))
metadataString = ET.tostring(sipRoot, pretty_print=True, xml_declaration=True, encoding="utf-8")
metadataFile = open(os.path.join(accession, accessionNumber + ".xml"), "w")
metadataFile.write(metadataString)
metadataFile.close()
print str(noDateCount) + " unable to get dates"
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from django.conf import settings
from django.utils import html
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_variables # noqa
from oslo_utils import strutils
import six
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.images \
import utils as image_utils
from openstack_dashboard.dashboards.project.instances \
import utils as instance_utils
LOG = logging.getLogger(__name__)
def create_upload_form_attributes(prefix, input_type, name):
"""Creates attribute dicts for the switchable upload form
:type prefix: str
:param prefix: prefix (environment, template) of field
:type input_type: str
:param input_type: field type (file, raw, url)
:type name: str
:param name: translated text label to display to user
:rtype: dict
:return: an attribute set to pass to form build
"""
attributes = {'class': 'switched', 'data-switch-on': prefix + 'source'}
attributes['data-' + prefix + 'source-' + input_type] = name
return attributes
class TemplateForm(forms.SelfHandlingForm):
class Meta(object):
name = _('Select Template')
help_text = _('Select a template to launch a stack.')
# TODO(jomara) - update URL choice for template & environment files
# w/ client side download when applicable
base_choices = [('file', _('File')),
('raw', _('Direct Input'))]
url_choice = [('url', _('URL'))]
attributes = {'class': 'switchable', 'data-slug': 'templatesource'}
template_source = forms.ChoiceField(label=_('Template Source'),
choices=base_choices + url_choice,
widget=forms.Select(attrs=attributes))
attributes = create_upload_form_attributes(
'template',
'file',
_('Template File'))
template_upload = forms.FileField(
label=_('Template File'),
help_text=_('A local template to upload.'),
widget=forms.FileInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'template',
'url',
_('Template URL'))
template_url = forms.URLField(
label=_('Template URL'),
help_text=_('An external (HTTP) URL to load the template from.'),
widget=forms.TextInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'template',
'raw',
_('Template Data'))
template_data = forms.CharField(
label=_('Template Data'),
help_text=_('The raw contents of the template.'),
widget=forms.widgets.Textarea(attrs=attributes),
required=False)
attributes = {'data-slug': 'envsource', 'class': 'switchable'}
environment_source = forms.ChoiceField(
label=_('Environment Source'),
choices=base_choices,
widget=forms.Select(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'env',
'file',
_('Environment File'))
environment_upload = forms.FileField(
label=_('Environment File'),
help_text=_('A local environment to upload.'),
widget=forms.FileInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'env',
'raw',
_('Environment Data'))
environment_data = forms.CharField(
label=_('Environment Data'),
help_text=_('The raw contents of the environment file.'),
widget=forms.widgets.Textarea(attrs=attributes),
required=False)
def __init__(self, *args, **kwargs):
self.next_view = kwargs.pop('next_view')
super(TemplateForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned = super(TemplateForm, self).clean()
files = self.request.FILES
self.clean_uploaded_files('template', _('template'), cleaned, files)
self.clean_uploaded_files('environment', _('environment'), cleaned,
files)
# Validate the template and get back the params.
kwargs = {}
if cleaned['environment_data']:
kwargs['environment'] = cleaned['environment_data']
try:
files, tpl =\
api.heat.get_template_files(cleaned.get('template_data'),
cleaned.get('template_url'))
kwargs['files'] = files
kwargs['template'] = tpl
validated = api.heat.template_validate(self.request, **kwargs)
cleaned['template_validate'] = validated
cleaned['template_validate']['files'] = files
cleaned['template_validate']['template'] = tpl
except Exception as e:
raise forms.ValidationError(six.text_type(e))
return cleaned
def clean_uploaded_files(self, prefix, field_label, cleaned, files):
"""Cleans Template & Environment data from form upload.
Does some of the crunchy bits for processing uploads vs raw
data depending on what the user specified. Identical process
for environment data & template data.
:type prefix: str
:param prefix: prefix (environment, template) of field
:type field_label: str
:param field_label: translated prefix str for messages
:type input_type: dict
:param prefix: existing cleaned fields from form
:rtype: dict
:return: cleaned dict including environment & template data
"""
upload_str = prefix + "_upload"
data_str = prefix + "_data"
url = cleaned.get(prefix + '_url')
data = cleaned.get(prefix + '_data')
has_upload = upload_str in files
# Uploaded file handler
if has_upload and not url:
log_template_name = files[upload_str].name
LOG.info('got upload %s' % log_template_name)
tpl = files[upload_str].read()
if tpl.startswith('{'):
try:
json.loads(tpl)
except Exception as e:
msg = _('There was a problem parsing the'
' %(prefix)s: %(error)s')
msg = msg % {'prefix': prefix, 'error': e}
raise forms.ValidationError(msg)
cleaned[data_str] = tpl
# URL handler
elif url and (has_upload or data):
msg = _('Please specify a %s using only one source method.')
msg = msg % field_label
raise forms.ValidationError(msg)
elif prefix == 'template':
# Check for raw template input - blank environment allowed
if not url and not data:
msg = _('You must specify a template via one of the '
'available sources.')
raise forms.ValidationError(msg)
def create_kwargs(self, data):
kwargs = {'parameters': data['template_validate'],
'environment_data': data['environment_data']}
if data.get('stack_id'):
kwargs['stack_id'] = data['stack_id']
return kwargs
def handle(self, request, data):
kwargs = self.create_kwargs(data)
# NOTE (gabriel): This is a bit of a hack, essentially rewriting this
# request so that we can chain it as an input to the next view...
# but hey, it totally works.
request.method = 'GET'
return self.next_view.as_view()(request, **kwargs)
class ChangeTemplateForm(TemplateForm):
class Meta(object):
name = _('Edit Template')
help_text = _('Select a new template to re-launch a stack.')
stack_id = forms.CharField(label=_('Stack ID'),
widget=forms.widgets.HiddenInput)
stack_name = forms.CharField(label=_('Stack Name'),
widget=forms.TextInput(attrs={'readonly':
'readonly'}))
class PreviewTemplateForm(TemplateForm):
class Meta(object):
name = _('Preview Template')
help_text = _('Select a new template to preview a stack.')
class CreateStackForm(forms.SelfHandlingForm):
param_prefix = '__param_'
class Meta(object):
name = _('Create Stack')
environment_data = forms.CharField(
widget=forms.widgets.HiddenInput,
required=False)
parameters = forms.CharField(
widget=forms.widgets.HiddenInput)
stack_name = forms.RegexField(
max_length=255,
label=_('Stack Name'),
help_text=_('Name of the stack to create.'),
regex=r"^[a-zA-Z][a-zA-Z0-9_.-]*$",
error_messages={'invalid':
_('Name must start with a letter and may '
'only contain letters, numbers, underscores, '
'periods and hyphens.')})
timeout_mins = forms.IntegerField(
initial=60,
label=_('Creation Timeout (minutes)'),
help_text=_('Stack creation timeout in minutes.'))
enable_rollback = forms.BooleanField(
label=_('Rollback On Failure'),
help_text=_('Enable rollback on create/update failure.'),
required=False)
def __init__(self, *args, **kwargs):
parameters = kwargs.pop('parameters')
# special case: load template data from API, not passed in params
if kwargs.get('validate_me'):
parameters = kwargs.pop('validate_me')
super(CreateStackForm, self).__init__(*args, **kwargs)
if self._stack_password_enabled():
self.fields['password'] = forms.CharField(
label=_('Password for user "%s"') % self.request.user.username,
help_text=_('This is required for operations to be performed '
'throughout the lifecycle of the stack'),
widget=forms.PasswordInput())
self._build_parameter_fields(parameters)
def _stack_password_enabled(self):
stack_settings = getattr(settings, 'OPENSTACK_HEAT_STACK', {})
return stack_settings.get('enable_user_pass', True)
def _build_parameter_fields(self, template_validate):
self.help_text = template_validate['Description']
params = template_validate.get('Parameters', {})
if template_validate.get('ParameterGroups'):
params_in_order = []
for group in template_validate['ParameterGroups']:
for param in group.get('parameters', []):
if param in params:
params_in_order.append((param, params[param]))
else:
# no parameter groups, simply sorted to make the order fixed
params_in_order = sorted(params.items())
for param_key, param in params_in_order:
field = None
field_key = self.param_prefix + param_key
field_args = {
'initial': param.get('Default', None),
'label': param.get('Label', param_key),
'help_text': html.escape(param.get('Description', '')),
'required': param.get('Default', None) is None
}
param_type = param.get('Type', None)
hidden = strutils.bool_from_string(param.get('NoEcho', 'false'))
if 'CustomConstraint' in param:
choices = self._populate_custom_choices(
param['CustomConstraint'])
field_args['choices'] = choices
field = forms.ChoiceField(**field_args)
elif 'AllowedValues' in param:
choices = map(lambda x: (x, x), param['AllowedValues'])
field_args['choices'] = choices
field = forms.ChoiceField(**field_args)
elif param_type == 'Json' and 'Default' in param:
field_args['initial'] = json.dumps(param['Default'])
field = forms.CharField(**field_args)
elif param_type in ('CommaDelimitedList', 'String', 'Json'):
if 'MinLength' in param:
field_args['min_length'] = int(param['MinLength'])
field_args['required'] = field_args['min_length'] > 0
if 'MaxLength' in param:
field_args['max_length'] = int(param['MaxLength'])
if hidden:
field_args['widget'] = forms.PasswordInput()
field = forms.CharField(**field_args)
elif param_type == 'Number':
if 'MinValue' in param:
field_args['min_value'] = int(param['MinValue'])
if 'MaxValue' in param:
field_args['max_value'] = int(param['MaxValue'])
field = forms.IntegerField(**field_args)
# heat-api currently returns the boolean type in lowercase
# (see https://bugs.launchpad.net/heat/+bug/1361448)
# so for better compatibility both are checked here
elif param_type in ('Boolean', 'boolean'):
field = forms.BooleanField(**field_args)
if field:
self.fields[field_key] = field
@sensitive_variables('password')
def handle(self, request, data):
prefix_length = len(self.param_prefix)
params_list = [(k[prefix_length:], v) for (k, v) in six.iteritems(data)
if k.startswith(self.param_prefix)]
fields = {
'stack_name': data.get('stack_name'),
'timeout_mins': data.get('timeout_mins'),
'disable_rollback': not(data.get('enable_rollback')),
'parameters': dict(params_list),
'files': json.loads(data.get('parameters')).get('files'),
'template': json.loads(data.get('parameters')).get('template')
}
if data.get('password'):
fields['password'] = data.get('password')
if data.get('environment_data'):
fields['environment'] = data.get('environment_data')
try:
api.heat.stack_create(self.request, **fields)
messages.success(request, _("Stack creation started."))
return True
except Exception:
exceptions.handle(request)
def _populate_custom_choices(self, custom_type):
if custom_type == 'neutron.network':
return instance_utils.network_field_data(self.request, True)
if custom_type == 'nova.keypair':
return instance_utils.keypair_field_data(self.request, True)
if custom_type == 'glance.image':
return image_utils.image_field_data(self.request, True)
if custom_type == 'nova.flavor':
return instance_utils.flavor_field_data(self.request, True)
return []
class EditStackForm(CreateStackForm):
class Meta(object):
name = _('Update Stack Parameters')
stack_id = forms.CharField(
label=_('Stack ID'),
widget=forms.widgets.HiddenInput)
stack_name = forms.CharField(
label=_('Stack Name'),
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
@sensitive_variables('password')
def handle(self, request, data):
prefix_length = len(self.param_prefix)
params_list = [(k[prefix_length:], v) for (k, v) in six.iteritems(data)
if k.startswith(self.param_prefix)]
stack_id = data.get('stack_id')
fields = {
'stack_name': data.get('stack_name'),
'timeout_mins': data.get('timeout_mins'),
'disable_rollback': not(data.get('enable_rollback')),
'parameters': dict(params_list),
'files': json.loads(data.get('parameters')).get('files'),
'template': json.loads(data.get('parameters')).get('template')
}
if data.get('password'):
fields['password'] = data.get('password')
try:
api.heat.stack_update(self.request, stack_id=stack_id, **fields)
messages.success(request, _("Stack update started."))
return True
except Exception:
exceptions.handle(request)
class PreviewStackForm(CreateStackForm):
class Meta(object):
name = _('Preview Stack Parameters')
def __init__(self, *args, **kwargs):
self.next_view = kwargs.pop('next_view')
super(CreateStackForm, self).__init__(*args, **kwargs)
def handle(self, request, data):
prefix_length = len(self.param_prefix)
params_list = [(k[prefix_length:], v) for (k, v) in six.iteritems(data)
if k.startswith(self.param_prefix)]
fields = {
'stack_name': data.get('stack_name'),
'timeout_mins': data.get('timeout_mins'),
'disable_rollback': not(data.get('enable_rollback')),
'parameters': dict(params_list),
'files': json.loads(data.get('parameters')).get('files'),
'template': json.loads(data.get('parameters')).get('template')
}
if data.get('environment_data'):
fields['environment'] = data.get('environment_data')
try:
stack_preview = api.heat.stack_preview(self.request, **fields)
request.method = 'GET'
return self.next_view.as_view()(request,
stack_preview=stack_preview)
except Exception:
exceptions.handle(request)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.