prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
#!/usr/bin/env python3
import argparse
from sys import argv
from vang.bitbucket.get_branches import get_branches
from vang. | bitbucket.utils import get_repo_specs
def has_branch(repo_specs, branch):
for spec in repo_specs:
branches = [
b['displayId'] for spec, bs in get_branches((spec, ), branch)
for b in bs
]
yield spec, branch in branches
def main(branch,
only_has=True,
only_not_has=False, |
dirs=None,
repos=None,
projects=None):
specs = get_repo_specs(dirs, repos, projects)
for spec, has in has_branch(specs, branch):
if only_has:
if has:
print(f'{spec[0]}/{spec[1]}')
elif only_not_has:
if not has:
print(f'{spec[0]}/{spec[1]}')
else:
print(f'{spec[0]}/{spec[1]}, {branch}: {has}')
def parse_args(args):
parser = argparse.ArgumentParser(
description='Check repository branches in Bitbucket')
parser.add_argument('branch', help='The branch to check')
filter_group = parser.add_mutually_exclusive_group()
filter_group.add_argument(
'-o',
'--only_has',
action='store_true',
help='Print only repos that has the branch.')
filter_group.add_argument(
'-n',
'--only_not_has',
action='store_true',
help='Print only repos that not has the branch.')
group = parser.add_mutually_exclusive_group()
group.add_argument(
'-d',
'--dirs',
nargs='*',
default=['.'],
help='Git directories to extract repo information from')
group.add_argument(
'-r', '--repos', nargs='*', help='Repos, e.g. key1/repo1 key2/repo2')
group.add_argument(
'-p', '--projects', nargs='*', help='Projects, e.g. key1 key2')
return parser.parse_args(args)
if __name__ == '__main__': # pragma: no cover
main(**parse_args(argv[1:]).__dict__)
|
# -*- coding: utf-8 -*-
# daemon/pidfile.py
# Part of ‘python-daemon’, an implementation of PEP 3143.
#
# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Apache License, version 2.0 as published by the
# Apache Software Foundation.
# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
""" Lockfile behaviour implemented via Unix PID files.
"""
from __future__ import (absolute_import, unicode_literals)
from lockfile.pidlockfile import PIDLockFile
class TimeoutPIDLockFile(PIDL | ockFile, object):
""" Lockfile with default timeout, implemented as a Unix PID file.
This uses the ``PIDLockFile`` implementation, with the
following changes:
* The `acquire_timeout` parameter to the initialiser will be
used as the default `timeout` parameter for the `acquire`
method.
"""
def __init__(self, path, acquire_timeout=None, *args, **kwargs):
""" Set up the parameters of a TimeoutPI | DLockFile.
:param path: Filesystem path to the PID file.
:param acquire_timeout: Value to use by default for the
`acquire` call.
:return: ``None``.
"""
self.acquire_timeout = acquire_timeout
super(TimeoutPIDLockFile, self).__init__(path, *args, **kwargs)
def acquire(self, timeout=None, *args, **kwargs):
""" Acquire the lock.
:param timeout: Specifies the timeout; see below for valid
values.
:return: ``None``.
The `timeout` defaults to the value set during
initialisation with the `acquire_timeout` parameter. It is
passed to `PIDLockFile.acquire`; see that method for
details.
"""
if timeout is None:
timeout = self.acquire_timeout
super(TimeoutPIDLockFile, self).acquire(timeout, *args, **kwargs)
# Local variables:
# coding: utf-8
# mode: python
# End:
# vim: fileencoding=utf-8 filetype=python :
|
# -*- coding: utf-8 -*-
from contextlib import contextmanager
from django.conf import settings
from django.core.signals import request_started
from django.db import reset_queries
from django.template import context
from django.utils.translation import get_language, activate
from shutil import rmtree as _rmtree
from tempfile import template, mkdtemp
import sys
from cms.utils.compat.string_io import StringIO
class NULL:
pass
class SettingsOverride(object):
"""
Overrides Django settings within a context and resets them to their inital
values on exit.
Example:
with SettingsOverride(DEBUG=True):
# do something
"""
def __init__(self, **overrides):
self.overrides = overrides
self.special_handlers = {
'TEMPLATE_CONTEXT_PROCESSORS': self.template_context_processors,
}
def __enter__(self):
self.old = {}
for key, value in self.overrides.items():
self.old[key] = getattr(settings, key, NULL)
setattr(settings, key, value)
def __exit__(self, type, value, traceback):
for key, value in self.old.items():
if value is not NULL:
setattr(settings, key, value)
else:
delattr(settings,key) # do not pollute the context!
self.special_handlers.get(key, lambda:None)()
def template_context_processors(self):
context._standard_context_processors = None
class StdOverride(object):
def __init__(self, std='out', buffer=None):
self.std = std
self.buffer = buffer or StringIO()
def __enter__(self):
setattr(sys, 'std%s' % self.std, self.buffer)
return self.buffer
def __exit__(self, type, value, traceback):
setattr(sys, 'std%s' % self.std, getattr(sys, '__std%s__' % self.std))
class StdoutOverride(StdOverride):
"""
This overrides Python's the standard output and redirects it to a StringIO
object, so that on can test the output of the program.
example:
lines = None
with StdoutOverride() as buffer:
# print stuff
lines = buffer.getvalue()
"""
def __init__(self, buffer=None):
super(StdoutOverride, self).__init__('out', buffer)
class LanguageOverride(object):
def __init__(self, language):
self.newlang = language
def __enter__(self):
self.oldlang = get_language()
activate(self.newlang)
def __exit__(self, type, value, traceback):
activate(self.oldlang)
class TemporaryDirectory:
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everthing contained
in it are removed.
"""
def __init__(self, suffix="", prefix=template, dir=None):
self.name = mkdtemp(suffix, prefix, dir)
def __enter__(self):
return self.name
def cleanup(self):
try:
from tempfile import _exists
if _exists(self.name):
_rmtree(self.name)
except ImportError:
pass
def __exit__(self, exc, value, tb):
self.cleanup()
class UserLoginContext(object):
def __init__(self, testcase, user):
self.testcase = testcase
self.user = user
def __enter__(self):
loginok = self.testcase.client.login(username=self.user.username,
password=self.user.username)
self.old_user = getattr(self.testcase, 'user', None)
self.testcase.user = self.user
self.testcase.assertTrue(loginok)
def __exit__(self, exc, value, tb):
self.testcase.user = self.old_user
if not self.testcase.user:
delattr(self.testcase, 'user')
self.testcase.client.logout()
class ChangeModel(object):
"""
Changes attributes on a model while within the context.
These changes *ARE* saved to the database for the context!
"""
def __init__(self, instance, **overrides):
self.instance = instance
self.overrides = overrides
def __enter__(self):
self.old = {}
for key, value in self.overrides.items():
self.old[key] = getattr(self.instance, key, NULL)
setattr(self.instance, key, value)
self.instance.save()
def __exit__(self, exc, value, tb):
for key in self.overrides.keys():
old_value = self.old[key]
if old_value is NULL:
delattr(self.instance, key)
else:
setattr(self.instance, key, old_value)
self.instance.save()
class _AssertNumQueriesContext(object):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
self.connection = connection
def __enter__(self):
self.old_debug = settings.DEBUG
settings.DEBUG = True
self.starting_queries = len(self.connection.queries)
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
settings.DEBUG = self.old_debug
request_started.connect(reset_queries)
if exc_type is not None:
return
final_queries = len(self.connection.queries)
executed = final_queries - self.starting_querie | s
queries = '\n'.join([q['sql'] for q in self.connection.queries[self.starting_queries:]])
self.test_case.assertEqual(
executed, self.num, "%d queries executed, %d expected. Queries executed:\n%s" % (
executed, self.num, queries
)
)
@contextman | ager
def disable_logger(logger):
old = logger.disabled
logger.disabled = True
yield
logger.disabled = old
|
(output_file, 'w')
def get_blank_query_stats_dict():
"""
Returns an empty query statistics dictionary.
"""
return {'click_depth': 0,
'hover_depth': 0,
'docs_viewed': 0,
'cg': 0,}
def get_cg_value(queryid, rank):
"""
Given a queryid and a rank, returns the CG for the document at that rank based upon the QRELS.
"""
serp_filename = get_serp_file(queryid)
serp_fo = open(serp_filename, 'r')
for line in serp_fo:
if line.startswith('rank'):
continue
line = line.strip().split(',')
if line[0] == str(rank):
serp_fo.close()
return int(line[7])
serp_fo.close()
return 0
for root, dirnames, filenames in os.walk(run_base_dir):
for filename in fnmatch.filter(filenames, '*.log'):
log_filename = os.path.join(root, filename)
log_details = get_log_filename_info(filename)
real_queries = user_queries[log_details['userid']][log_details['topic']]
f = open(log_filename, 'r')
query_counter = 0
curr_depth = 0
sim_query_stats = get_blank_query_stats_dict()
for line in f:
line = line.strip().split()
if line[1] == 'QUERY':
sim_query = ' '.join(line[4:])
real_query = queries[real_queries[query_counter]]
if sim_query == real_query['terms']:
if sim_query_stats['click_depth'] > 0 or sim_query_stats['hover_depth'] > 0: # Was there a previous query?
| sim_queries[strat][threshold][queryid] = sim_query_stats
sim_query_stats = get_blank_query_stats_dict()
curr_depth = 0
strat = log_details['ss']
queryid = real_query[ | 'queryid']
threshold = log_details['threshold']
#print 'QUERY ISSUED {0}-{1} ({2}, t={3})'.format(queryid, real_query['terms'], log_details['ss'], log_details['threshold'])
if strat not in observed_strategies:
observed_strategies[strat] = []
if threshold not in observed_strategies[strat]:
observed_strategies[strat].append(threshold)
if strat not in sim_queries:
sim_queries[strat] = {}
if threshold not in sim_queries[strat]:
sim_queries[strat][threshold] = {}
query_counter += 1
elif line[1] == 'SNIPPET':
#print ' Snippet encountered for {0}'.format(line[5])
curr_depth += 1
sim_query_stats['hover_depth'] = curr_depth
#print ' HD: {0} CD: {1} DV: {2}'.format(sim_query_stats['hover_depth'], sim_query_stats['click_depth'], sim_query_stats['docs_viewed'])
elif line[1] == 'DOC' and line[4] == 'EXAMINING_DOCUMENT':
#print ' Document clicked - {0}'.format(line[5])
sim_query_stats['click_depth'] = curr_depth
sim_query_stats['docs_viewed'] += 1
#print ' HD: {0} CD: {1} DV: {2}'.format(sim_query_stats['hover_depth'], sim_query_stats['click_depth'], sim_query_stats['docs_viewed'])
elif line[1] == 'MARK':
#print 'Marked document {0}'.format(line[4])
#print 'Topic: {0}'.format(log_details['topic'])
#print 'QID: {0}'.format(queryid)
#print 'Current depth: {0}'.format(curr_depth)
#print 'CG: {0}'.format(get_cg_value(queryid, curr_depth))
sim_query_stats['cg'] += get_cg_value(queryid, curr_depth)
#print
#pass
elif line[1] == 'INFO': # We have reached the end of the simulation log file.
sim_queries[strat][threshold][queryid] = sim_query_stats
sim_query_stats = get_blank_query_stats_dict()
curr_depth = 0
break
f.close()
header_str = 'queryid,userid,topic,condition,real_docs_viewed,real_doc_click_depth,real_doc_hover_depth,real_cg,'
for strategy in observed_strategies.keys():
#print strategy
threshold_keys = sim_queries[strategy].keys()
natural_keys = map(float, threshold_keys)
zipped = zip(natural_keys, threshold_keys)
thresholds = sorted(zipped, key=lambda x:x[0])
for threshold in thresholds:
#print threshold
threshold = threshold[1]
header_str = '{0}{1},'.format(header_str, '{0}-{1}-doc_click_depth'.format(strategy, threshold))
header_str = '{0}{1},'.format(header_str, '{0}-{1}-doc_hover_depth'.format(strategy, threshold))
header_str = '{0}{1},'.format(header_str, '{0}-{1}-docs_viewed'.format(strategy, threshold))
header_str = '{0}{1},'.format(header_str, '{0}-{1}-cg'.format(strategy, threshold))
#print
header_str = header_str[:-1]
output_file.write('{0}{1}'.format(header_str, os.linesep))
for queryid in sorted(map(int, queries.keys())):
queryid = str(queryid)
query_line = '{0},{1},{2},{3},{4},{5},{6},{7},'.format(queryid,
queries[queryid]['userid'],
queries[queryid]['topic'],
queries[queryid]['condition'],
queries[queryid]['real_docs_viewed'],
queries[queryid]['real_doc_click_depth'],
queries[queryid]['real_doc_hover_depth'],
queries[queryid]['real_cg'])
for strategy in observed_strategies.keys():
threshold_keys = sim_queries[strategy].keys()
natural_keys = map(float, threshold_keys)
zipped = zip(natural_keys, threshold_keys)
thresholds = sorted(zipped, key=lambda x:x[0])
for threshold in thresholds:
threshold = threshold[1]
if queryid not in sim_queries[strategy][threshold]:
query_line = '{0}0,0,0,0,'.format(query_line)
else:
query_line = '{0}{1},{2},{3},{4},'.format(query_line,
sim_queries[strategy][threshold][queryid]['click_depth'],
sim_queries[strategy][threshold][queryid]['hover_depth'],
sim_queries[strategy][threshold][queryid]['docs_viewed'],
sim_queries[strategy][threshold][queryid]['cg'],)
query_line = query_line[:-1]
output_file.write('{0}{1}'.format(query_line, os.linesep))
output_file.close()
return 0
def usage(script_name):
"""
Prints the usage to the terminal, returning an error code.
"""
print "Usage: {0} <query_details_file> <per_query_stats_file> <run_base_dir> <output_file>".format(script_name)
|
import sys
from stack import Stack
def parse_expression_into_parts(expression):
"""
Parse expression into list of parts
:rtype : list
:param expression: str # i.e. "2 * 3 + ( 2 - 3 )"
"""
raise NotImplementedError("complete me!")
def evaluate_expression(a, b, op):
raise NotImpleme | ntedError("complete me!")
def evaluate_postfix(parts):
raise NotImplementedError("complete me!")
if __name__ == "__main__":
expr = None
if len(sys.argv) > 1:
expr = sys.argv[1]
parts = parse_expression_into_parts(expr)
print "Evaluating %s == %s" % (expr, evaluate_postfix(parts))
else:
print 'Usage: python postfix.py "<expr>" -- i.e. python postfix.py "9 1 3 + 2 * -"'
print "Spaces are required between every term." | |
#!/usr/bin/e | nv python
"""Test registry for builders."""
# These need to register plugins so, pylint: disable=unused-import
from grr.lib.builders import signing_test
# pylint: enable=unus | ed-import
|
fr | om .p | olygon import *
|
e[0] // 3
clf = self.factory(alpha=0.01, average=X2.shape[0])
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
clf.partial_fit(X2[third:], Y2[third:])
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
c | lf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alp | ha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple l |
import json
import os
from flask import request, g, render_template, make_response, jsonify, Response
from helpers.raw_endpoint import get_id, store_json_to_file
from helpers.groups import get_groups
from json_controller import JSONController
from main import app
from pymongo import MongoClient, errors
HERE = os.path.dirname(os.path.abspath(__file__))
# setup database connection
def connect_client():
"""Connects to Mongo client"""
try:
return MongoClient(app.config['DB_HOST'], int(app.config['DB_PORT']))
except errors.ConnectionFailure as e:
raise e
def get_db():
"""Connects to Mongo database"""
if not hasattr(g, 'mongo_client'):
g.mongo_client = connect_client()
g.mongo_db = getattr(g.mongo_client, app.config['DB_NAME'])
g.groups_collection = g.mongo_db[os.environ.get('DB_GROUPS_COLLECTION')]
return g.mongo_db
@app.teardown_appcon | text
def close_db(error):
"""Closes connection with Mongo clien | t"""
if hasattr(g, 'mongo_client'):
g.mongo_client.close()
# Begin view routes
@app.route('/')
@app.route('/index/')
def index():
"""Landing page for SciNet"""
return render_template("index.html")
@app.route('/faq/')
def faq():
"""FAQ page for SciNet"""
return render_template("faq.html")
@app.route('/leaderboard/')
def leaderboard():
"""Leaderboard page for SciNet"""
get_db()
groups = get_groups(g.groups_collection)
return render_template("leaderboard.html", groups=groups)
@app.route('/ping', methods=['POST'])
def ping_endpoint():
"""API endpoint determines potential article hash exists in db
:return: status code 204 -- hash not present, continue submission
:return: status code 201 -- hash already exists, drop submission
"""
db = get_db()
target_hash = request.form.get('hash')
if db.raw.find({'hash': target_hash}).count():
return Response(status=201)
else:
return Response(status=204)
@app.route('/articles')
def ArticleEndpoint():
"""Eventual landing page for searching/retrieving articles"""
if request.method == 'GET':
return render_template("articles.html")
@app.route('/raw', methods=['POST'])
def raw_endpoint():
"""API endpoint for submitting raw article data
:return: status code 405 - invalid JSON or invalid request type
:return: status code 400 - unsupported content-type or invalid publisher
:return: status code 201 - successful submission
"""
# Ensure post's content-type is supported
if request.headers['content-type'] == 'application/json':
# Ensure data is a valid JSON
try:
user_submission = json.loads(request.data)
except ValueError:
return Response(status=405)
# generate UID for new entry
uid = get_id()
# store incoming JSON in raw storage
file_path = os.path.join(
HERE,
'raw_payloads',
str(uid)
)
store_json_to_file(user_submission, file_path)
# hand submission to controller and return Resposne
db = get_db()
controller_response = JSONController(user_submission, db=db, _id=uid).submit()
return controller_response
# User submitted an unsupported content-type
else:
return Response(status=400)
#@TODO: Implicit or Explicit group additions? Issue #51 comments on the issues page
#@TODO: Add form validation
@app.route('/requestnewgroup/', methods=['POST'])
def request_new_group():
# Grab submission form data and prepare email message
data = request.json
msg = "Someone has request that you add {group_name} to the leaderboard \
groups. The groups website is {group_website} and the submitter can \
be reached at {submitter_email}.".format(
group_name=data['new_group_name'],
group_website=data['new_group_website'],
submitter_email=data['submitter_email'])
return Response(status=200)
'''
try:
email(
subject="SciNet: A new group has been requested",
fro="no-reply@scinet.osf.io",
to='harry@scinet.osf.io',
msg=msg)
return Response(status=200)
except:
return Response(status=500)
'''
# Error handlers
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify( { 'error': 'Page Not Found' } ), 404)
@app.errorhandler(405)
def method_not_allowed(error):
return make_response(jsonify( { 'error': 'Method Not Allowed' } ), 405) |
remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.tests = NUTestsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.test_suite_runs = NUTestSuiteRunsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
The name given by the operator to the Test Suite.
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
The name given by the operator to the Test Suite.
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def description(self):
""" Get description value.
Notes:
An operator given description of the Test Suite.
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
An operator given description of the Test Suite.
"""
self._description = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this | entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
" | ""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def underlay_test(self):
""" Get underlay_test value.
Notes:
Flag to define if this Test Suite is the internal 'Underlay Tests' Test Suite
This attribute is named `underlayTest` in VSD API.
"""
return self._underlay_test
@underlay_test.setter
def underlay_test(self, value):
""" Set underlay_test value.
Notes:
Flag to define if this Test Suite is the internal 'Underlay Tests' Test Suite
This attribute is named `underlayTest` in VSD API.
"""
self._underlay_test = value
@property
def enterprise_id(self):
""" Get enterprise_id value.
Notes:
The ID of the Enterprise to which this Test Suite belongs to.
This attribute is named `enterpriseID` in VSD API.
"""
return self._enterprise_id
@enterprise_id.setter
def enterprise_id(self, value):
""" Set enterprise_id value.
Notes:
The ID of the Enterprise to which this Test Suite belongs to.
This attribute is named `enterpriseID` in VSD API.
"""
self._enterprise_id = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This |
):
new_index -= len(self.selection_types)
while new_index < 0:
new_index += len(self.selection_types)
self._selection_index = new_index
@property
def selection_type(self):
""":obj:`str` : The current selection type in :attr:`selection_types`
determined by :attr:`selection_index`
"""
return self.selection_types[self.selection_index]
@property
def zoom(self):
""":obj:`int` : Zoom factor for the pan
The zoom factor determines the width and height of the pan area. For
example, if ``zoom=2``, then the width would be half the image width
and the height would be half the image height. Setting the zoom will
adjust the pan size in the views.
"""
return self._zoom
@zoom.setter
def zoom(self, new_zoom):
if new_zoom < 1.0:
new_zoom = 1.0
self._zoom = float(new_zoom) |
for view in self._views:
view.adjust_pan_size()
@property
def x_radius(self):
""":obj:`float` : Half the image width"""
return self.shape[1] / 2
@property
def y_radius(self):
""":obj:`float` : Half the image height"""
return self.shape[0] / 2
@property
def pan_width(self):
""":obj:`float` : Width of the | pan area"""
return self.x_radius / self.zoom
@property
def pan_height(self):
""":obj:`float` : Height of the pan area"""
return self.y_radius / self.zoom
def reset_center(self):
"""Reset the pan to the center of the image"""
self._center = self.current_image.get_center()
def _point_is_in_image(self, point):
"""Determine if the point is in the image
Parameters
----------
point : :obj:`tuple` of two :obj:`int`
Tuple with x and y coordinates
Returns
-------
is_in_image : :obj:`bool`
True if the point is in the image. False otherwise.
"""
data_x, data_y = point
height, width = self.shape[:2]
in_width = -0.5 <= data_x <= (width + 0.5)
in_height = -0.5 <= data_y <= (height + 0.5)
is_in_image = in_width and in_height
return is_in_image
def _determine_center_x(self, x):
"""Determine the x coordinate of center of the pan
This method makes sure the pan doesn't go out of the left or right
sides of the image
Parameters
----------
x : :obj:`float`
The x coordinate to determine the center x coordinate
Returns
-------
x_center : :obj:`float`
The x coordinate of the center of the pan
"""
width = self.shape[1]
left_of_left_edge = x - self.pan_width < 0
right_of_right_edge = x + self.pan_width > (width)
in_width = not left_of_left_edge and not right_of_right_edge
if in_width:
center_x = x
elif left_of_left_edge:
center_x = self.pan_width
elif right_of_right_edge:
center_x = width - self.pan_width
return center_x
def _determine_center_y(self, y):
"""Determine the y coordinate of center of the pan
This method makes sure the pan doesn't go out of the top or bottom
sides of the image
Parameters
----------
y : :obj:`float`
The y coordinate to determine the center y coordinate
Returns
-------
center_y : :obj:`float`
The y coordinate of the center of the pan
"""
height = self.shape[0]
below_bottom = y - self.pan_height < -0.5
above_top = y + self.pan_height > (height + 0.5)
in_height = not below_bottom and not above_top
if in_height:
center_y = y
elif below_bottom:
center_y = self.pan_height
elif above_top:
center_y = height - self.pan_height
return center_y
@property
def center(self):
""":obj:`tuple` of two :obj:`float` : x and y coordinate of the center
of the pan.
Setting the center will move the pan to the new center. The center
points cannot result in the pan being out of the image. If they are
they will be changed so the pan only goes to the edge.
"""
if self._center is None:
self.reset_center()
return self._center
@center.setter
def center(self, new_center):
if self._point_is_in_image(new_center):
x, y = new_center
center = (
self._determine_center_x(x), self._determine_center_y(y),
)
self._center = center
for view in self._views:
view.move_pan()
@property
def all_rois_coordinates(self):
""":obj:`tuple` of two :class:`numpy.ndarray` : Coordinates of where
there is a pixel selected in a ROI
"""
return np.where((self._roi_data != 0).any(axis=2))
@property
def alpha255(self):
""":obj:`float` The alpha value normalized between 0 and 255"""
return self._alpha * 255.
@property
def alpha(self):
""":obj:`float` : The alpha value between 0 and 1
Setting the alpha value will change the opacity of all the ROIs and
then set the data in the views
"""
return self._alpha
@alpha.setter
def alpha(self, new_alpha):
self._alpha = new_alpha
rows, cols = self.all_rois_coordinates
self._roi_data[rows, cols, 3] = self.alpha255
for view in self._views:
view.change_roi_opacity()
@property
def flip_x(self):
""":obj:`bool` : If True, flip the x axis
Setting the ``flip_x`` will display the transformation in the views
"""
return self._flip_x
@flip_x.setter
def flip_x(self, new_flip_x):
self._flip_x = new_flip_x
for view in self._views:
view.set_transforms()
@property
def flip_y(self):
""":obj:`bool` : If True, flip the y axis
Setting the ``flip_y`` will display the transformation in the views
"""
return self._flip_y
@flip_y.setter
def flip_y(self, new_flip_y):
self._flip_y = new_flip_y
for view in self._views:
view.set_transforms()
@property
def swap_xy(self):
""":obj:`bool` : If True, swap the x and y axis
Setting the ``swap_xy`` will display the transformation in the views
"""
return self._swap_xy
@swap_xy.setter
def swap_xy(self, new_swap_xy):
self._swap_xy = new_swap_xy
for view in self._views:
view.set_transforms()
@property
def transforms(self):
""":obj:`tuple` of :obj:`bool` : the :attr:`flip_x`, :attr:`flip_y`, and
:attr:`swap_xy` transformations"""
return self.flip_x, self.flip_y, self.swap_xy
@property
def edges(self):
""":obj:`tuple` of four :obj:`float` : The ``left``, ``bottom``,
``right`` and ``top`` edges of the pan
"""
x, y = self.center
left = int(round(x - self.pan_width))
right = int(round(x + self.pan_width))
bottom = int(round(y - self.pan_height))
top = int(round(y + self.pan_height))
return left, bottom, right, top
@property
def pan_slice(self):
""":obj:`numpy.s_` : Slice of pan to extract data from an array"""
x1, y1, x2, y2 = self.edges
pan_slice = np.s_[y1:y2:1, x1:x2:1]
return pan_slice
@property
def pan_data(self):
""":class:`numpy.ndarray` : The data within the pan"""
return self.current_image.get_data()[self.pan_slice]
@property
def pan_roi_data(self):
""":class:`numpy.ndarray` : The ROI data in the pan"""
return self._roi_data[self.pan_slice]
def _get_rgb255_from_color(self, color):
"""Get the rgb values normalized between 0 and 255 given a color
Parameters
----------
color : :obj: |
# encoding: utf-8
from djang | o.contrib import admin
# Register your | models here.
|
od(temp))
data.append(numpy.array(columns).transpose())
self.data = numpy.concatenate(data)
def dump(self, f):
"""Dumps the table in csv format
Arguments:
| ``f`` -- the file object to write to
"""
c = csv.writer(f)
c.writerow([self.label, "[%s]" % self.unit_name])
c.writerow(["Temperatures"] + [temp for temp in self.temps])
for key, row in zip(self.keys, self.data):
c.writerow([key] + [value/self.unit for value in row])
class ReactionAnalysis(object):
"""A Reaction analysis object."""
def __init__(self, kinetic_model, temp_low, temp_high, temp_step=10*kelvin):
"""
Arguments:
| ``kinetic_model`` -- A kinetic model object. See
mod:`tamkin.chemmod`.
| ``temp_low`` -- The lower bound of the temperature interval in
Kelvin.
| ``temp_high`` -- The upper bound of the temperature interval in
Kelvin.
Optional arguments:
| ``temp_step`` -- The resolution of the temperature grid.
[default=10K]
The rate constants are computed on the specified temperature grid
and afterwards the kinetic parameters are fitted to these data. All
the results are stored as attributes of the reaction analysis object
and can be written to text files (method write_to_file) or plotted
(methods plot and plot_parameters). The results from multiple
reactions can be gathered in a single plot when this is desirable.
The following attributes may be useful:
| ``A`` and ``Ea`` -- The kinetic parameters in atomic units.
| ``R2`` -- The Pearson R^2 of the fit.
| ``temps`` -- An array with the temperature grid in Kelvin
| ``temps_inv`` -- An array with the inverse temperatures
| ``rate_consts`` -- the rate constants at the grid points in atomic
units
| ``ln_rate_consts`` -- the logarithm of `the rate constants in
atomic units`
"""
self.kinetic_model = kinetic_model
self.temp_low = float(temp_low)
self.temp_high = float(temp_high)
self.temp_step = float(temp_step)
self.temp_high = numpy.ceil((self.temp_high-self.temp_low)/self.temp_step)*self.temp_step+self.temp_low
# make sure that the final temperature is included
self.temps = numpy.arange(self.temp_low,self.temp_high+0.5*self.temp_step,self.temp_step,dtype=float)
self.temps_inv = 1/self.temps
self.ln_rate_consts = numpy.array([
self.kinetic_model.rate_constant(temp, do_log=True)
for temp in self.temps
])
self.rate_consts = numpy.exp(self.ln_rate_consts)
design_matrix = numpy.zeros((len(self.temps),2), float)
design_matrix[:,0] = 1
design_matrix[:,1] = -self.temps_inv/boltzmann
expected_values = self.ln_rate_consts
if not numpy.isfinite(expected_values).all():
raise ValueError("non-finite rate constants. check your partition functions for errors.")
self.hessian = numpy.dot(design_matrix.transpose(), design_matrix)
self.parameters, SSE, rank, s = numpy.linalg.lstsq(design_matrix, self.ln_rate_consts, rcond=None)
SST = ((self.ln_rate_consts - self.ln_rate_consts.mean())**2).sum()
self.R2 = 1-SSE/SST
self.A = numpy.exp(self.parameters[0])
self.Ea = self.parameters[1]
self.covarian | ce = None # see monte_carlo method
def dump(self, f):
"""Write the results in text format on screen or to another stream.
Argument:
| ``f`` -- the file object to write to.
"""
print("Summary", file=f)
print("A [%s] = %.5e" % (self.kinetic_model.unit_name, self.A/self.kinetic_model.unit), file=f)
| print("ln(A [a.u.]) = %.2f" % (self.parameters[0]), file=f)
print("Ea [kJ/mol] = %.2f" % (self.Ea/kjmol), file=f)
print("R2 (Pearson) = %.2f%%" % (self.R2*100), file=f)
print(file=f)
if self.covariance is not None:
print("Error analysis", file=f)
print("Number of Monte Carlo iterations = %i" % self.monte_carlo_iter, file=f)
print("Relative systematic error on the frequencies = %.2f" % self.freq_error, file=f)
print("Relative systematic error on the energy = %.2f" % self.energy_error, file=f)
print("Error on A [%s] = %10.5e" % (self.kinetic_model.unit_name, numpy.sqrt(self.covariance[0,0])*self.A/self.kinetic_model.unit), file=f)
print("Error on ln(A [a.u.]) = %.2f" % numpy.sqrt(self.covariance[0,0]), file=f)
print("Error on Ea [kJ/mol] = %.2f" % (numpy.sqrt(self.covariance[1,1])/kjmol), file=f)
print("Parameter correlation = %.2f" % (self.covariance[0,1]/numpy.sqrt(self.covariance[0,0]*self.covariance[1,1])), file=f)
print(file=f)
print("Temperature grid",file=f)
print("T_low [K] = %.1f" % self.temp_low, file=f)
print("T_high [K] = %.1f" % self.temp_high, file=f)
print("T_step [K] = %.1f" % self.temp_step, file=f)
print("Number of temperatures = %i" % len(self.temps), file=f)
print(file=f)
if self.kinetic_model.tunneling is not None:
self.kinetic_model.tunneling.dump(f)
print("Reaction rate constants", file=f)
print(" T [K] Delta_r F [kJ/mol] k(T) [%s]" % (self.kinetic_model.unit_name), file=f)
for i in range(len(self.temps)):
temp = self.temps[i]
delta_free = self.kinetic_model.free_energy_change(temp)
print("% 10.2f %8.1f % 10.5e" % (
temp, delta_free/kjmol, self.rate_consts[i]/self.kinetic_model.unit
), file=f)
print(file=f)
self.kinetic_model.dump(f)
print(file=f)
def write_to_file(self, filename):
"""Write the entire analysis to a text file.
One argument:
| ``filename`` -- the file to write the output.
"""
with open(filename, "w") as f:
self.dump(f)
def plot_arrhenius(self, filename=None, label=None, color="red"):
"""Plot the rate constant and the fitted line.
Optional arguments:
| ``filename`` -- When given, the plot is written to that file,
otherwise this plot method can be called multiple
times with different reaction analysis objects to
put all the results in one plot.
| ``label`` -- When multiple fits are put in one figure, this label
is used distinguish between the various results with
a legend.
| ``color`` -- Determines the color of the plotted data points and
line. [default="red"]. Common color names, html codes
and RGB tuples are accepted. (See matplotlib docs for
more info.)
"""
import matplotlib.pyplot as pt
temps_inv_line = numpy.linspace(self.temps_inv.min(),self.temps_inv.max(),100)
ln_rate_consts_line = self.parameters[0] - self.parameters[1]/boltzmann*temps_inv_line
if filename is not None:
pt.clf()
pt.title('Arrhenius plot')
pt.text(
0.05, 0.05,
"A [%s] = %.3e\nEa [kJ/mol] = %.1f\nR^2 [%%] = %.1f" % (
self.kinetic_model.unit_name, self.A/self.kinetic_model.unit,
self.Ea/kjmol, self.R2*100
),
transform=pt.gca().transAxes
)
pt.xlabel("1/T [1/K]")
pt.ylabel("Rate coefficient [%s]" % self.kinetic_model.unit_name)
if label is None:
label_fit = "Fitted line"
label_data = "Computed values"
else:
|
# (c) 2015, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
''' handler for package operations '''
name = self._task.args.get('name', None)
state = self._task.args.get('state', None)
module = self._t | ask.args.get('use', 'auto')
if module == 'auto':
try:
module = self._templar.template('{{ansible_service_mgr}}')
except:
pass # could not get it from template!
if module == 'auto':
facts = self._execute_module(module_name='setup', module_args=dict(filter='ansible_service_mgr'), task_vars=task_vars)
self._display.debug("Facts %s" % | facts)
if not 'failed' in facts:
module = getattr(facts['ansible_facts'], 'ansible_service_mgr', 'auto')
if not module or module == 'auto' or module not in self._shared_loader_obj.module_loader:
module = 'service'
if module != 'auto':
# run the 'service' module
new_module_args = self._task.args.copy()
if 'use' in new_module_args:
del new_module_args['use']
self._display.vvvv("Running %s" % module)
return self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars)
else:
return {'failed': True, 'msg': 'Could not detect which service manager to use. Try gathering facts or setting the "use" option.'}
|
import json
import re
import subprocess
from django.conf import settings
default_app_config = "peering.apps.PeeringConfig"
def call_irr_as_set_resolver(irr_as_set, address_family=6):
"""
Call a subprocess to expand the given AS-SET for an IP version.
"""
prefixes = []
if not irr_as_set:
return prefixes
# Call bgpq3 with arguments to get a JSON result
command = [
settings.BGPQ3_PATH,
"-h",
settings.BGPQ3_HOST,
"-S",
settings.BGPQ3_SOURCES,
"-{}".format(address_family),
"-A",
"-j",
"-l",
"prefix_list",
irr_as_set,
]
# Merge user settings to command line right before the name of the prefix list
if settings.BGPQ3_ARGS:
index = len(command) - 3
command[index:index] = settings.BGPQ3_ARGS[
"ipv6" if address_family == 6 else "ipv4"
]
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
if process.returncode != 0:
error_log = "bgpq3 exit code is {}".format(process.returncode)
if err | and err.strip():
error_log += ", stderr: {}".format(err)
| raise ValueError(error_log)
prefixes.extend([p for p in json.loads(out.decode())["prefix_list"]])
return prefixes
def parse_irr_as_set(asn, irr_as_set):
"""
Validate that an AS-SET is usable and split it into smaller part if it is actually
composed of several AS-SETs.
"""
as_sets = []
# Can't work with empty or whitespace only AS-SET
if not irr_as_set or not irr_as_set.strip():
return ["AS{}".format(asn)]
unparsed = re.split(r"[/,&\s]", irr_as_set)
for value in unparsed:
value = value.strip()
if not value:
continue
for regexp in [
# Remove registry prefix if any
r"^(?:{}):[:\s]".format(settings.BGPQ3_SOURCES.replace(",", "|")),
# Removing "ipv4:" and "ipv6:"
r"^(?:ipv4|ipv6):",
]:
pattern = re.compile(regexp, flags=re.IGNORECASE)
value, number_of_subs_made = pattern.subn("", value)
# If some substitutions have been made, make sure to clean things up
if number_of_subs_made > 0:
value = value.strip()
as_sets.append(value)
return as_sets
|
"""
Settings validations for the theming app
"""
import os
import six
from django.conf import settings
from dj | ango.core.checks import Error, Tags, register
@register(Tags.compatibility)
def check_comprehensive_theme_settings(app_configs, **kwargs):
"""
Checks the comprehensive theming theme directory settings.
Raises compatibility Errors upon:
- COMPREHENSIVE_THEME_DIRS is not a list
- theme dir path is not a string
- theme dir path is not an absolute path
- path specified in COMPREHENSIVE_THEME | _DIRS does not exist
Returns:
List of any Errors.
"""
if not getattr(settings, "ENABLE_COMPREHENSIVE_THEMING"):
# Only perform checks when comprehensive theming is enabled.
return []
errors = []
# COMPREHENSIVE_THEME_DIR is no longer supported - support has been removed.
if hasattr(settings, "COMPREHENSIVE_THEME_DIR"):
theme_dir = settings.COMPREHENSIVE_THEME_DIR
errors.append(
Error(
"COMPREHENSIVE_THEME_DIR setting has been removed in favor of COMPREHENSIVE_THEME_DIRS.",
hint='Transfer the COMPREHENSIVE_THEME_DIR value to COMPREHENSIVE_THEME_DIRS.',
obj=theme_dir,
id='openedx.core.djangoapps.theming.E001',
)
)
if hasattr(settings, "COMPREHENSIVE_THEME_DIRS"):
theme_dirs = settings.COMPREHENSIVE_THEME_DIRS
if not isinstance(theme_dirs, list):
errors.append(
Error(
"COMPREHENSIVE_THEME_DIRS must be a list.",
obj=theme_dirs,
id='openedx.core.djangoapps.theming.E004',
)
)
if not all([isinstance(theme_dir, six.string_types) for theme_dir in theme_dirs]):
errors.append(
Error(
"COMPREHENSIVE_THEME_DIRS must contain only strings.",
obj=theme_dirs,
id='openedx.core.djangoapps.theming.E005',
)
)
if not all([theme_dir.startswith("/") for theme_dir in theme_dirs]):
errors.append(
Error(
"COMPREHENSIVE_THEME_DIRS must contain only absolute paths to themes dirs.",
obj=theme_dirs,
id='openedx.core.djangoapps.theming.E006',
)
)
if not all([os.path.isdir(theme_dir) for theme_dir in theme_dirs]):
errors.append(
Error(
"COMPREHENSIVE_THEME_DIRS must contain valid paths.",
obj=theme_dirs,
id='openedx.core.djangoapps.theming.E007',
)
)
return errors
|
#######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
"""
Features class used for Computer Vision algorithms.
"""
from .library import *
from .array import *
import numbers
class Features(object):
"""
A container class used for various feature detectors.
Parameters
----------
num: optional: int. default: 0.
Specifies the number of features.
"""
def __init__(self, num=0):
self.feat = c_void_ptr_t(0)
if num is not None:
assert(isinstance(num, numbers.Number))
safe_call(backend.get().af_create_features(c_pointer(self.feat), c_dim_t(num)))
def __del__(self):
"""
Release features' memory
"""
if self.feat:
backend.get().af_release_features(self.feat)
self.feat = None
def num_features(self):
"""
Returns the number of features detected.
"""
num = c_dim_t(0)
safe_call(backend.get().af_get_features_num(c_pointer(num), self.feat))
return num
def get_xpos(self):
"""
Returns the x-positions of the features detected.
"""
out = Array()
safe_call(backend.get().af_get_features_xpos(c_pointer(out.arr), self.feat))
return out
def get_ypos(self):
"""
Returns the y-positions of the features detected.
"""
o | ut = Array()
safe_call(backend.get().af_get_features_ypos(c_pointer(out.arr), self.feat))
return out
def get_score(self):
"""
Returns the scores of the features detected.
"""
out = Array()
safe_call(backend.get().af_get_features_score(c_pointer(out.arr), self.feat))
return out
def get_orienta | tion(self):
"""
Returns the orientations of the features detected.
"""
out = Array()
safe_call(backend.get().af_get_features_orientation(c_pointer(out.arr), self.feat))
return out
def get_size(self):
"""
Returns the sizes of the features detected.
"""
out = Array()
safe_call(backend.get().af_get_features_size(c_pointer(out.arr), self.feat))
return out
|
from rpython.flowspace.model import *
import py
def test_mingraph():
g = FunctionGraph("g", Block([]))
g.startblock.closeblock(Link([Constant(1)], g.returnblock))
checkgraph(g)
def template():
g = FunctionGraph("g", Block([]))
g.startblock.closeblock(Link([Constant(1)], g.returnblock))
checkgraph(g)
py.test.raises(AssertionErr | or, checkgraph, g)
def test_exitlessblocknotexitblock():
g = FunctionGraph("g", Block([]))
py.test.raises(AssertionError, checkgraph, g)
def test_nonvariableinputarg():
b = Block([Constant(1)])
g = FunctionGraph("g", b)
g.startblock.closeblock(Link([Constant(1)], g.returnblock))
py.test.raises(AssertionError, checkgraph, | g)
def test_multiplydefinedvars():
v = Variable()
g = FunctionGraph("g", Block([v, v]))
g.startblock.closeblock(Link([v], g.returnblock))
py.test.raises(AssertionError, checkgraph, g)
v = Variable()
b = Block([v])
b.operations.append(SpaceOperation("add", [Constant(1), Constant(2)], v))
g = FunctionGraph("g", b)
g.startblock.closeblock(Link([v], g.returnblock))
py.test.raises(AssertionError, checkgraph, g)
def test_varinmorethanoneblock():
v = Variable()
g = FunctionGraph("g", Block([]))
g.startblock.operations.append(SpaceOperation("pos", [Constant(1)], v))
b = Block([v])
g.startblock.closeblock(Link([v], b))
b.closeblock(Link([v], g.returnblock))
py.test.raises(AssertionError, checkgraph, g)
def test_useundefinedvar():
v = Variable()
g = FunctionGraph("g", Block([]))
g.startblock.closeblock(Link([v], g.returnblock))
py.test.raises(AssertionError, checkgraph, g)
v = Variable()
g = FunctionGraph("g", Block([]))
g.startblock.exitswitch = v
g.startblock.closeblock(Link([Constant(1)], g.returnblock))
py.test.raises(AssertionError, checkgraph, g)
def test_invalid_arg():
v = Variable()
g = FunctionGraph("g", Block([]))
g.startblock.operations.append(SpaceOperation("pos", [1], v))
g.startblock.closeblock(Link([v], g.returnblock))
py.test.raises(AssertionError, checkgraph, g)
def test_invalid_links():
g = FunctionGraph("g", Block([]))
g.startblock.closeblock(Link([Constant(1)], g.returnblock), Link([Constant(1)], g.returnblock))
py.test.raises(AssertionError, checkgraph, g)
v = Variable()
g = FunctionGraph("g", Block([v]))
g.startblock.exitswitch = v
g.startblock.closeblock(Link([Constant(1)], g.returnblock, True),
Link([Constant(1)], g.returnblock, True))
py.test.raises(AssertionError, checkgraph, g)
v = Variable()
g = FunctionGraph("g", Block([v]))
g.startblock.exitswitch = v
g.startblock.closeblock(Link([Constant(1)], g.returnblock))
py.test.raises(AssertionError, checkgraph, g)
|
.append(a)
return ' '.join(new_args)
def format_max_vmem(vmem, fields):
if not vmem:
vmem = 0
return "%.2fM" % (vmem / (1024.0*1024))
def format_hostname(hostname, fields):
suffix = '.eqiad.wmflabs'
if hostname.endswith(suffix):
hostname = hostname[:-len(suffix)]
return hostname
def format_job_id_job(job_id, fields):
return format_job_table_anchor(job_id)
def format_job_id_accounting(job_id, fields):
return format_accounting_table_anchor(job_id)
def format_time(t, fields):
if t:
return "%.2f" % t
return str(t)
# fields displayed by cmd=status, [0] is the database field name, [1] is the
# the <th> label, [2] is an optionnal formater function, default formater is
# str(data).
job_table_field = [
('job_id', 'job id', format_job_id_job),
('job_state', 'job state'),
('sge_jobnumber', 'sge job id', format_sge_jobnumber_job),
('job_run_cmd', 'cmd', format_command),
('job_args', 'args', format_args),
('job_submit_time', 'submit time (UTC)', format_timestamp),
]
accounting_table_field = [
('job_id', 'job id', format_job_id_accounting),
('sge_jobnumber', 'sge job id', format_sge_jobnumber_accounting),
('sge_hostname', 'host name', format_hostname),
('sge_qsub_time', 'submit at', format_timestamp),
('sge_start_time', 'start at', format_timestamp),
('sge_end_time', 'end at', format_timestamp),
('sge_failed', 'failed'),
('sge_exit_status', 'exit status'),
('sge_ru_utime', 'utime', format_time),
('sge_ru_stime', 'stime', format_time),
('sge_ru_wallclock', 'wallclock'),
('sge_used_maxvmem', 'max vmem', format_max_vmem),
]
def query_params(environ):
import cgi
field = cgi.FieldStorage(environ['wsgi.input'])
rdict = {
'format' : 'html',
'cmd' : 'status',
'filter' : '',
'book' : '',
'lang' : ''
}
for name in field:
if type(field[name]) == types.ListType:
rdict[name] = field[name][-1].value
else:
rdict[name] = field[name].value
return rdict
def handle_ping(start_response):
# pseudo ping, as we run on the web server, we always return 1 ms.
text = json.dumps( { 'error' : 0,
'text' : 'pong',
'server' : 'hocr',
'ping' : 0.001
} )
start_response('200 OK', [('Content-Type',
'text/plain; charset=UTF-8'),
('Content-Length', len(text)),
('Access-Control-Allow-Origin', '*')])
return [ text ]
def get_int_param(params, name, default, max_val = None):
try:
result = params.get(name, default)
result = int(result)
if max_val:
result = min(result, max_val)
except:
result = default
return result
def table_header(fields):
text = ' <tr>\n'
for f in fields:
text += ' <th>' + f[1] + '</th>\n'
text += ' </tr>\n'
return text
def to_html(data, fields):
text = ' <tr>\n'
for f in fields:
if f[0] in data:
text += ' <td>'
if len(f) >= 3:
text += str(f[2](data[f[0]], data))
else:
text += str(data[f[0]])
text += '</td>\n'
else:
text += '<td>Unknow field</td>'
text += ' </tr>\n'
return text
def prev_next_link(prev, has_next, state_filter, limit, offset, default_limit):
href = False
if prev:
label = 'Prev'
if offset:
new_offset = max(offset - limit, 0)
href = True
else:
label = 'Next'
if has_next:
new_offset = offset + limit
href = True
if href:
link = '<a href="?cmd=status&filter=%s' % state_filter
if new_offset:
link += "&offset=%d" % new_offset
if limit != default_limit:
link += "&limit=%d" % limit
link += '">' + label + '</a>'
else:
link = label
return link
def job_table(db_obj, state_filter, limit, offset, default_limit, max_limit):
data, has_next = db_obj.get_job_table(state_filter, limit, offset)
link_prev = prev_next_link(True, has_next, state_filter, limit,
offset, default_limit)
link_next = prev_next_link(False, has_next, state_filter, limit,
offset, default_limit)
text = link_prev + ' ' + link_next + '\n'
text += '<table class="wikitable" style="text-align:right;margin-left:auto;margin-right:auto;">\n'
global job_table_field
text += table_header(job_table_field)
for d in data:
text += to_html(d, job_table_field)
text += '</table>\n'
| return text, data
def accounting_table(db_obj, jobs, state_filter,
limit, offset, default_limit, | max_limit):
job_ids = [ x['job_id'] for x in jobs ]
# FIXME: offset/limit not correct, we must have separate offset/limit
# than the job table offset/limit.
data, has_next = db_obj.get_accounting_table(limit, 0, job_ids)
global accounting_table_field
link_prev = prev_next_link(True, has_next, state_filter, limit,
offset, default_limit)
link_next = prev_next_link(False, has_next, state_filter, limit,
offset, default_limit)
text = link_prev + ' ' + link_next + '\n'
text += '<table class="wikitable" style="text-align:right;margin-left:auto;margin-right:auto;">\n'
text += table_header(accounting_table_field)
for d in data:
text += to_html(d, accounting_table_field)
text += '</table>\n'
return text
def handle_status(params, start_response):
default_limit = 50
max_limit = 1000
state_filter = params.get('filter', '')
limit = get_int_param(params, 'limit', default_limit, max_limit)
offset = get_int_param(params, 'offset', 0, None)
#print >> sys.stderr, params
db_obj = sge_jobs.DbJob()
text = common_html.get_head('hocr', css = 'shared.css').encode('utf-8') + '\n <body>\n'
html, jobs = job_table(db_obj, state_filter, limit, offset,
default_limit, max_limit)
text += html
text += accounting_table(db_obj, jobs, state_filter, limit, offset,
default_limit, max_limit)
text += ' </body>\n</html>'
start_response('200 OK', [('Content-Type',
'text/html; charset=UTF-8'),
('Content-Length', len(text)),
('Access-Control-Allow-Origin', '*')])
return [ text ]
def gen_hocr_request(params):
job_req = {
'jobname' : 'hocr',
'run_cmd' : 'python',
'args' : [
os.path.expanduser('~/phe/hocr/hocr.py'),
'-lang:' + params['lang'],
'-book:' + params['book']
],
'max_vmem' : 1024,
}
db_obj = sge_jobs.DbJob()
db_obj.add_request(**job_req)
def handle_query(params, start_response):
print >> sys.stderr, params
if params['lang'] and params['book']:
try:
ret_code = '200 OK'
result = hocr.get_hocr(params['lang'], params['book'])
except:
utils.print_traceback()
ret_code = '500 Internal Server Error'
result = { 'error' : 1, 'text' : ret_code }
else:
ret_code = '400 Bad Request'
result = { 'error' : 1, 'text' : ret_code }
try:
text = json.dumps(result)
except UnicodeDecodeError:
print >> sys.stderr, result
ret_code = '400 Bad Request'
text = json.dumps({ 'error' : 1, 'text' : ret_code })
start_response(ret_code, [('Content-Type',
'application/json' + '; charset=UTF-8'),
('Content-Length', len(text)),
('Access-Control-Allow-Origin', '*')])
return [ text ]
def myapp(environ, start_response):
params = query_params(env |
# tool_extrude.py
# Extrusion tool.
# Copyright (c) 2015, Lennart Riecken
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PySide import QtGui, QtCore
from tool import Tool, EventData, MouseButtons, KeyModifiers, Face
from plugin_api import register_plugin
class ExtrudeTool(Tool):
def __init__(self, api):
super(ExtrudeTool, self).__init__(api)
# Create our action / icon
self.action = QtGui.QAction(QtGui.QPixmap(":/images/gfx/icons/border-bottom-thick.png"), "Extrude", None)
self.action.setStatusTip("Extude region")
self.action.setCheckable(True)
self.action.setShortcut(QtGui.QKeySequence("Ctrl+0"))
# Register the tool
self.priority = 10
self.api.register_tool(self)
# Area tool helper
self._mouse = None
self._stamp = []
self.xdir = True
self.ydir = True
self.zdir = True
self.pastoffset = 0
self.fixeddirection = False
def drawstamp(self, data, dx, dy, dz):
for x, y, z, col in self._stamp:
tgt = data.voxels.get(x + dx, y + dy, z + dz)
if tgt == 0:
data.voxels.set(x + dx, y + dy, z + dz, col, True, 1)
data.voxels.completeUndoFill()
def on_drag_start(self, data):
if len(data.voxels._selection) > 0:
self._stamp = []
for x, y, z in data.voxels._selection:
col = data.voxels.get(x, y, z)
self._stamp.append((x, y, z, col))
self._mouse = (data.mouse_x, data.mouse_y)
if QtCore.Qt.Key_X in data.keys:
self.xdir = True
self.ydir = False
self.zdir = False
self.fixeddirection = True
elif QtCore.Qt.Key_Y in data.keys:
self.xdir = False
self.ydir = True
self.zdir = False
self.fixeddirection = True
elif QtCore.Qt.Key_Z in data.keys:
self.xdir = False
self.ydir = False
self.zdir = True
self.fixeddirection = True
else:
self.xdir = True
self.ydir = True
self.zdir = True
self.fixeddirection = False
self.pastoffset = 0
# When dragging, create the selection
def on_drag(self, data):
# In case the first click has missed a valid target.
if self._mouse is None or len(self._stamp) == 0:
return
dx = data.mouse_x - self._mouse[0]
dy = data.mouse_y - self._mouse[1]
# Work out some sort of vague translation between screen and voxels
sx = self.api.mainwindow.width() / data.voxels.width
sy = self.api.mainwindow.height() / data.voxels.height
dx = int(round(dx / float(sx)))
dy = int(round(dy / float(sy)))
if dx == 0 and dy == 0:
return
# Work out translation for x,y
ax, ay = self.api.mainwindow.display.view_axis()
tx = 0
ty = 0
tz = 0
tdx = 0
tdy = 0
tdz = 0
if ax == self.api.mainwindow.display.X_AXIS:
tdx = dx
if dx > 0:
tx = 1
elif dx < 0:
tx = -1
elif ax == self.api.mainwindow.display.Y_AXIS:
tdy = dx
if dx > 0:
ty = 1
elif dx < 0:
ty = -1
elif ax == self.api.mainwindow.display.Z_AXIS:
tdz = dx
if dx > 0:
tz = 1
elif dx < 0:
tz = -1
if ay == self.api.mainwindow.display.X_AXIS:
tdx = dy
if dy > 0:
tx = 1
elif dy < 0:
tx = -1
elif ay == self.api.mainwindow.display.Y_AXIS:
tdy = dy
if dy > 0:
ty = -1
elif dy < 0:
ty = 1
elif ay == self.api.mainwindow.display.Z_AXIS:
tdz = dy
if dy > 0:
tz = 1
elif dy < 0:
tz = -1
if self.fixeddirection:
if self.xdir:
if tx != 0:
self._mouse = (data.mouse_x, data.mouse_y)
self.pastoffset += tx
self.drawstamp(data, self.pastoffset, 0, 0)
elif self.ydir:
if ty != 0:
self._mouse = (data.mouse_x, data.mouse_y)
self.pastoffset += ty
self.drawstamp(data, 0, self.pastoffset, 0)
elif self.zdir:
if tz != 0:
self._mouse = (data.mouse_x, data.mouse_y)
self.pastoffset += tz
self.drawstamp(data, 0, 0, self.pastoffset)
else:
if tx != 0 and self.xdir and (not self.ydir or (abs(tdx) > abs(tdy) and abs(tdx) > abs(tdz))):
self._mouse = (data.mouse_x, data.mouse_y)
self.ydir = False
self.zdir = False
self.pastoffset += tx
self.drawstamp(data, self.pastoffset, 0, 0)
elif ty != 0 and self.ydir and (not self.zdir or abs(tdy) > abs(tdz)):
self._mouse = (data.mouse_x, data.mouse_y)
sel | f.xdir = False
self.zdir = False
self.pastoffset += ty
self. | drawstamp(data, 0, self.pastoffset, 0)
elif tz != 0 and self.zdir:
self._mouse = (data.mouse_x, data.mouse_y)
self.xdir = False
self.ydir = False
self.pastoffset += tz
self.drawstamp(data, 0, 0, self.pastoffset)
def on_drag_end(self, data):
data.voxels.clear_selection()
dx = self.pastoffset if self.xdir else 0
dy = self.pastoffset if self.ydir else 0
dz = self.pastoffset if self.zdir else 0
for x, y, z, col in self._stamp:
data.voxels.select(x + dx, y + dy, z + dz)
register_plugin(ExtrudeTool, "Extrude Tool", "1.0")
|
# Pyperclip v1.4
# A cross-platform clipboard module for Python. (only handles plain text for now)
# By Al Sweigart al@coffeeghost.net
# Usage:
# import pyperclip
# pyperclip.copy('The text to be copied to the clipboard.')
# spam = pyperclip.paste()
# On Mac, this module makes use of the pbcopy and pbpaste commands, which should come with the os.
# On Linux, this module makes use of the xclip command, which should come with the os. Otherwise run "sudo apt-get install xclip"
# Copyright (c) 2010, Albert Sweigart
# All rights reserved.
#
# BSD-style license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the pyperclip nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Albert Sweigart "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Albert Sweigart BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Change Log:
# 1.2 Use the platform module to help determine OS.
# 1.3 Changed ctypes.windll.user32.OpenClipboard(None) to ctypes.windll.user32.OpenClipboard(0), after some people ran into some TypeError
import platform, os
def winGetClipboard():
ctypes.windll.user32.OpenClipboard(0)
pcontents = ctypes.windll.user32.GetClipboardData(1) # 1 is CF_TEXT
data = ctypes.c_char_p(pcontents).value
#ctypes.windll.kernel32.GlobalUnlock(pcontents)
ctypes.windll.user32.CloseClipboard()
return data
def winSetClipboard(text):
text = str(text)
GMEM_DDESHARE = 0x2000
ctypes.windll.user32.OpenClipboard(0)
ctypes.windll.user32.EmptyClipboard()
try:
# works on Python 2 (bytes() only takes one argument)
hCd = ctypes.windll.kernel32.GlobalAlloc(GMEM_DDESHARE, len(bytes(text))+1)
except TypeError:
# works on Python 3 (bytes() requires an encoding)
hCd = ctypes.windll.kernel32.GlobalAlloc(GMEM_DDESHARE, len(bytes(text, 'ascii'))+1)
pchData = ctypes.windll.kernel32.GlobalLock(hCd)
try:
# works on Python 2 (bytes() only takes one argument)
ctypes.cdll.msvcrt.strcpy(ctypes.c_char_p(pchData), bytes(text))
except TypeError:
# works on Python 3 (bytes() requires an encoding)
ctypes.cdll.msvcrt.strcpy(ctypes.c_char_p(pchData), bytes(text, 'ascii'))
ctypes.windll.kernel32.GlobalUnlock(hCd)
ctypes.windll.user32.SetClipboardData(1, hCd)
ctypes.windll.user32.CloseClipboard()
def macSetClipboard(text):
text = str(text)
outf = os.popen('pbcopy', 'w')
outf.write(text)
outf.close()
def macGetClipboard():
outf = os.popen('pbpaste', 'r')
content = outf.read()
outf.close()
return content
def gtkGetClipboard():
return gtk.Clipboard().wait_for_text()
def gtkSetClipboard(text):
global cb
text = str(text)
cb = gtk.Clipboard()
cb.set_text(text)
cb.store()
def qtGetClipboard():
return str(cb.text())
def qtSetClipboard(text):
text = str(text)
cb.setText(text)
def xclipSetClipboard(text):
text = str(text)
outf = os.popen('xclip -selection c', 'w')
outf.write(text)
outf.close()
def xclipGetClipboard():
outf = os.popen('xclip -selection c -o', 'r')
content = outf.read()
outf.close()
return content
def xselSetClipboard(text):
text = str(text)
outf = os.popen('xsel -i', 'w')
outf.write(text)
outf.close()
def xselGetClipboard():
outf = os.popen('xsel -o', 'r')
content = outf.read()
outf.close()
return content
if os.name == 'nt' or platform.system() == 'Windows':
import ctypes
getcb = winGetClipboard
setcb = winSetClipboard
elif os.name == 'mac' or platform.system() == 'Darwin':
getcb = macGetClipboard
setcb = macSetClipboard
elif os.name == 'posix' or platform.system() == 'Linux':
xclipExists = os.system('which xclip') == 0
if xclipExists:
getcb = xclipGetClipboard
setcb = xclipSetClipboard
else:
| xselExists = os.system('which xsel') == 0
if xselExists:
getcb = xselGetClipboard
setcb = xselSetClipboard
try:
import gtk
getcb = gtkGetClipboard
setcb = gtkSetClipboard
except Exception:
try:
import PyQt4.QtCore
import PyQt4.QtGui
app = PyQt4.QApplication([])
| cb = PyQt4.QtGui.QApplication.clipboard()
getcb = qtGetClipboard
setcb = qtSetClipboard
except:
raise Exception('Pyperclip requires the gtk or PyQt4 module installed, or the xclip command.')
copy = setcb
paste = getcb
|
# mhkutil - A utility for dealing with Mohawk archives
#
# mhkutil is the legal property of its developers, whose names
# can be found in the AUTHORS file distributed with this source
# distribution.
#
# mhkutil is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# mhkutil is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mhkutil. If not, see <http://www.gnu.org/licenses/>.
import os
import struct
# TODO: Find a better place for this
def makeTag(text):
if len(text) != 4:
raise Exception('Invalid text size {0}'.format(len(text)))
return struct.unpack('>L', text)[0]
# TODO: Find a better place for this
def tagToString(tag):
return struct.pack('>L', tag)
class Stream:
def readByte(self):
return struct.unpack('B', self.read(1))[0]
def readSByte(self):
return struct.unpack('b', self.read(1))[0]
def readUint16LE(self):
return struct.unpack('<H', self.read(2))[0]
def readSint16LE(self):
return struct.unpack('<h', self.read(2))[0]
def readUint16BE(self):
return struct.unpack('>H', self.read(2))[0]
def readSint16BE(self):
return struct.unpack('>h', self.read(2))[0]
def readUint32LE(self):
return struct.unpack('<L', self.read(4))[0]
def readSint32LE(self):
return struct.unpack('<l', self.read(4))[0]
def readUint32BE(self):
return struct.unpack('>L', self.read(4))[0]
def readSint32BE(self):
return struct.unpack('>l', self.read(4))[0]
def readCString(self):
text = ''
while True:
char = self.readByte()
if char == 0:
break
text += chr(char)
return text
class WriteStream:
def writeByte(self, x):
self.write(struct.pack('B', x))
def writeSByte(self, x):
self.write(struct.pack('b', x))
def writeUint16LE(self, x):
self.write(struct.pack('<H', x))
def writeSint16LE(self, x):
self.write(struct.pack('<h', x))
def writeUint16BE(self, x):
self.write(struct.pack('>H', x))
def writeSint16BE(self, x):
self.write(struct.pack('>h', x))
def writeUint32LE(self, x):
self.write(struct.pack('<L', x))
def writeSint32LE(self, x):
self.write(struct.pack('<l', x))
def writeUint32BE(self, x):
self.write(struct.pack('>L', x))
def writeSint32BE(self, x):
self.write(struct.pack('>l', x))
class FileStream(Stream):
def __init__(self, handle):
self._handle = handle
handle.seek(0, os.SEEK_END)
self._size = handle.tell()
handle.seek(0)
def tell(self):
return self._handle.tell()
def size(self):
return self._size
def seek(self, offset, whence=os.SEEK_SET):
return self._handle.seek(offset, whence)
def read(self, size):
return bytearray(self._handle.read(size))
class FileWriteStream(WriteStream):
def __init__(self, handle):
self._handle = handle
def write(self, x):
self._handle.write(x)
class ByteStream(Stream):
def __init__(self, data):
self._data = data
self._pos = 0
def tell(self):
return self._pos
def size(self):
return len(self._data)
def | seek(self, offset, whence=os.SEEK_SET):
if whence == os.SEEK_CUR:
self._pos += offset
elif whence == os.SEEK_END:
self._pos = len(self._data) + offset
else:
self._pos = offset
def read(self, size):
| if size == 0:
return bytearray()
start = self._pos
end = start + size
self._pos = end
return self._data[start:end]
|
- See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api, _
import openerp.addons.decimal_precision as dp
class StockInformation(models.Model):
_inherit = 'stock.information'
@api.multi
def _compute_week(self):
super(StockInformation, self)._compute_week()
p_obj = self.env['procurement.order']
move_obj = self.env['stock.move']
for line in self:
if line.first_week:
moves = move_obj._find_moves_from_stock_information(
line.company, line.last_day_week,
products=[line.product.id], location_id=line.location,
periods=False)
else:
moves = move_obj._find_moves_from_stock_information(
line.company, line.last_day_week,
products=[line.product.id], from_date=line.first_day_week,
location_id=line.location, periods=False)
line.outgoing_pending_amount = sum(moves.mapped('product_uom_qty'))
line.outgoing_pending_moves = [(6, 0, moves.ids)]
states = ['confirmed', 'exception']
if line.first_week:
procurements = p_obj._find_procurements_from_stock_information(
line.company, line.last_day_week, states=states,
products=[line.product.id], location_id=line.location,
without_reserves=False, without_plan=False)
else:
procurements = p_obj._find_procurements_from_stock_information(
line.company, line.last_day_week, states=states,
from_date=line.first_day_week, products=[line.product.id],
location_id=line.location, without_reserves=False,
without_plan=False)
line.outgoing_pending_amount_reserv = sum(
procurements.mapped('product_qty'))
line.outgoing_pending_procurement_reserv = (
[(6, 0, procurements.ids)])
line.outgoing_pending_amount_moves = line.outgoing_pending_amount
line.outgoing_pending_amount += line.outgoing_pending_amount_reserv
states = ['confirmed', 'exception']
if line.first_week:
procurements = p_obj._find_procurements_from_stock_information(
line.company, line.last_day_week, states=states,
products=[line.product.id], location_id=line.location,
without_reserves=True, without_plan=False)
else:
procurements = p_obj._find_procurements_from_stock_information(
line.company, line.last_day_week, states=states,
from_date=line.first_day_week, products=[line.product.id],
location_id=line.location, without_reserves=True,
without_plan=False)
line.incoming_pending_amount_plan = sum(
procurements.mapped('product_qty'))
line.incoming_pending_procurements_plan = (
[(6, 0, procurements.ids)])
if line.first_week:
procurements = p_obj._find_procurements_from_stock_information(
line.company, line.last_day_week, states=states,
products=[line.product.id], location_id=line.location,
without_reserves=False, without_plan=False)
else:
procurements = p_obj._find_procurements_from_stock_information(
line.company, line.last_day_week, states=states,
from_date=line.first_day_week, products=[line.product.id],
location_id=line.location, without_reserves=False,
without_plan=False)
line.incoming_pending_amount_plan_reservation = sum(
procurements.mapped('product_qty'))
line.incoming_pending_procurements_plan_reservation = (
[(6, 0, procurements.ids)])
line.incoming_pending_amount += (
line.incoming_pending_amount_plan +
line.incoming_pending_amount_plan_reservation)
line.stock_availability = (line.qty_available - line.minimum_rule +
line.incoming_pending_amount)
if line.stock_availability >= line.outgoing_pending_amount:
line.virtual_stock = 0
else:
line.virtual_stock = (line.outgoing_pending_amount -
line.stock_availability)
incoming_pending_amount_plan = fields.Float(
'Incoming pending amount from plan', compute='_compute_week',
digits=dp.get_precision('Product Unit of Measure'),
help='Incoming from plan')
incoming_pending_amount_plan_required_run = fields.Float(
'Incoming from plan required run',
related='incoming_pending_amount_plan',
digits=dp.get_precision('Product Unit of Measure'), store=True)
incoming | _pending_procurements_plan = fields.Many2many(
comodel_name='procurement.order',
string='Incoming pending procurements from plan',
relation='rel_stock_info_incoming_pending_p | rocurement_plan',
column1='stock_info_id', column2='pending_procurement_plan_id',
compute='_compute_week')
incoming_pending_amount_plan_reservation = fields.Float(
'Incoming pending amount from plan reservation',
digits=dp.get_precision('Product Unit of Measure'),
compute='_compute_week', help='Incoming from plan reservation')
incoming_pending_amount_plan_reserv_required_run = fields.Float(
'Incoming from plan reserv required run',
related='incoming_pending_amount_plan_reservation',
digits=dp.get_precision('Product Unit of Measure'), store=True)
incoming_pending_procurements_plan_reservation = fields.Many2many(
comodel_name='procurement.order',
string='Incoming pending procurements from plan reservation',
relation='rel_stock_info_incoming_pending_procurement_plan_reserv',
column1='stock_info_id', column2='pending_procurement_plan_id',
compute='_compute_week')
outgoing_pending_amount_moves = fields.Float(
'Outgoing pending amount from moves', compute='_compute_week',
digits=dp.get_precision('Product Unit of Measure'),
help='Gross requirement')
outgoing_pending_amount_reserv = fields.Float(
'Outgoing pending amount reservation', compute='_compute_week',
digits=dp.get_precision('Product Unit of Measure'),
help='Gross requirement reservation')
outgoing_pending_procurement_reserv = fields.Many2many(
comodel_name='procurement.order',
string='Outgoing pending procurements reservation',
relation='rel_stock_info_outgoing_pending_procurement_reserv',
column1='stock_info_id', column2='pending_procurement_reserv_id',
compute='_compute_week')
@api.multi
def show_outgoing_pending_reserved_moves(self):
self.ensure_one()
return {'name': _('Outgoing pending reserved procurements'),
'view_type': 'form',
"view_mode": 'tree,form',
'res_model': 'procurement.order',
'type': 'ir.actions.act_window',
'domain': [('id', 'in',
self.outgoing_pending_procurement_reserv.ids)]}
@api.multi
def show_incoming_procurements_from_plan(self):
self.ensure_one()
return {'name': _('Incoming procurements from plan'),
'view_type': 'form',
"view_mode": 'tree,form',
'res_model': 'procurement.order',
'type': 'ir.actions.act_window',
'domain': [('id', 'in',
self.incoming_pending_procurements_plan.ids)]}
@api.multi
def show_incoming_procurements_from_plan_reservation(self):
self.ensure_one()
ids = self.incoming_pending_procurements_plan_reservation.ids
return {'name': _('Incoming procurements from plan reserv |
from django.contrib import admin
fro | m .models import Job
# Re | gister your models here.
admin.site.register(Job)
|
from django.contrib import admin
from django.con | trib.sites.models import Site
from . import models
class RedirectAdmin(admin.ModelAdmin):
list_display = ['slug', 'url', 'counter', 'created_at', 'full_url']
readonly_fields = ['counter', 'created_at', 'full_url']
fields = ['slug', 'url', 'counter', 'created_at', 'full_url']
def full_url(self, obj):
if not obj.slug:
return None
site = Site.objects.get_current()
url = obj.get_absolute_url()
return f'https://{ | site.domain}{url}'
admin.site.register(models.Redirect, RedirectAdmin)
|
import numpy as np
import cv2
import matplotlib | .pyplot as plt
img = cv2.imread('images/dolphin.png', 0)
cv2.imshow("Dolphin Image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
print('The intensity value at row 50 & column 100 is : {}'.format(img[49, 99]))
print('Row 50 column values:')
print(img[49, :])
print('Rows 101-103 & columns 201-203')
print(img[100:103, 200:203])
plt.plot(img[49, :])
plt.show()
| |
def LetterCount(str):
words = str.split(" ")
result | _word = ""
letter_count = 0
for word in words:
word_map = {}
for ch in word:
if ch in word_map:
word_map[ch] += 1
else:
word_map[ch] = 1
max_key = max(word_map.iterkeys(), key=lambda k: word_map[k])
| if letter_count < word_map[max_key] and word_map[max_key] > 1:
letter_count = word_map[max_key]
result_word = word
return result_word if letter_count > 1 else -1
print LetterCount("Hello apple pie")
print LetterCount("No words")
|
download_dst = git.get_repo(uri=uri, branch=branch, commit=ref,
destination_dir=download_dst)
os.chdir(download_dst)
try:
process.run('git remote add origin %s' % uri)
except process.CmdError:
pass
process.run('git pull origin %s' % branch)
except:
if not dir_existed and os.path.isdir(download_dst):
logging.error('Cleaning up provider %s download dir %s', provider,
download_dst)
shutil.rmtree(download_dst)
raise
# sanity check to ensure the git repository is OK:
try:
os.chdir(download_dst)
process.system('git log -1')
except process.CmdError:
logging.error('Something is unexpectedly wrong with the git repository at %s',
download_dst)
raise
finally:
os.chdir(original_dir)
def download_all_test_providers(update=False):
"""
Download all available test providers.
"""
for provider in get_test_provider_names():
download_test_provider(provider, update)
def get_all_assets():
asset_data_list = []
download_dir = data_dir.get_download_dir()
for asset in glob.glob(os.path.join(download_dir, '*.ini')):
asset_name = os.path.basename(asset)[:-4]
asset_data_list.append(get_asset_info(asset_name))
return asset_data_list
def get_file_asset(title, src_path, destination):
if not os.path.isabs(destination):
destination = os.path.join(data_dir.get_data_dir(), destination)
for ext in (".xz", ".gz", ".7z", ".bz2"):
if os.path.exists(src_path + ext):
destination = destination + ext
logging.debug('Found source image %s', destination)
return {
'url': None, 'sha1_url': None, 'destination': src_path + ext,
'destination_uncompressed': destination,
| 'uncompress_cmd': None, 'shortname': title, 'title': title,
'downloaded': Tr | ue}
if os.path.exists(src_path):
logging.debug('Found source image %s', destination)
return {'url': src_path, 'sha1_url': None, 'destination': destination,
'destination_uncompressed': None, 'uncompress_cmd': None,
'shortname': title, 'title': title,
'downloaded': os.path.exists(destination)}
return None
def get_asset_info(asset):
asset_info = {}
asset_path = os.path.join(data_dir.get_download_dir(), '%s.ini' % asset)
assert os.path.exists(asset_path)
asset_cfg = ConfigLoader(asset_path)
asset_info['url'] = asset_cfg.get(asset, 'url')
asset_info['sha1_url'] = asset_cfg.get(asset, 'sha1_url')
asset_info['title'] = asset_cfg.get(asset, 'title')
destination = asset_cfg.get(asset, 'destination')
if not os.path.isabs(destination):
destination = os.path.join(data_dir.get_data_dir(), destination)
asset_info['destination'] = destination
asset_info['asset_exists'] = os.path.isfile(destination)
# Optional fields
d_uncompressed = asset_cfg.get(asset, 'destination_uncompressed')
if d_uncompressed is not None and not os.path.isabs(d_uncompressed):
d_uncompressed = os.path.join(data_dir.get_data_dir(),
d_uncompressed)
asset_info['destination_uncompressed'] = d_uncompressed
asset_info['uncompress_cmd'] = asset_cfg.get(asset, 'uncompress_cmd')
return asset_info
def uncompress_asset(asset_info, force=False):
destination = asset_info['destination']
uncompress_cmd = asset_info['uncompress_cmd']
destination_uncompressed = asset_info['destination_uncompressed']
archive_re = re.compile(r".*\.(gz|xz|7z|bz2)$")
if destination_uncompressed is not None:
if uncompress_cmd is None:
match = archive_re.match(destination)
if match:
if match.group(1) == 'gz':
uncompress_cmd = ('gzip -cd %s > %s' %
(destination, destination_uncompressed))
elif match.group(1) == 'xz':
uncompress_cmd = ('xz -cd %s > %s' %
(destination, destination_uncompressed))
elif match.group(1) == 'bz2':
uncompress_cmd = ('bzip2 -cd %s > %s' %
(destination, destination_uncompressed))
elif match.group(1) == '7z':
uncompress_cmd = '7za -y e %s' % destination
else:
uncompress_cmd = "%s %s" % (uncompress_cmd, destination)
if uncompress_cmd is not None:
uncompressed_file_exists = os.path.exists(destination_uncompressed)
force = (force or not uncompressed_file_exists)
if os.path.isfile(destination) and force:
os.chdir(os.path.dirname(destination_uncompressed))
logging.debug('Uncompressing %s -> %s', destination,
destination_uncompressed)
commands.getstatusoutput(uncompress_cmd)
backup_file = destination_uncompressed + '.backup'
if os.path.isfile(backup_file):
logging.debug('Copying %s -> %s', destination_uncompressed,
backup_file)
shutil.copy(destination_uncompressed, backup_file)
def download_file(asset_info, interactive=False, force=False):
"""
Verifies if file that can be find on url is on destination with right hash.
This function will verify the SHA1 hash of the file. If the file
appears to be missing or corrupted, let the user know.
:param asset_info: Dictionary returned by get_asset_info
"""
file_ok = False
problems_ignored = False
had_to_download = False
sha1 = None
url = asset_info['url']
sha1_url = asset_info['sha1_url']
destination = asset_info['destination']
title = asset_info['title']
if sha1_url is not None:
try:
logging.info("Verifying expected SHA1 sum from %s", sha1_url)
sha1_file = urllib2.urlopen(sha1_url)
sha1_contents = sha1_file.read()
sha1 = sha1_contents.split(" ")[0]
logging.info("Expected SHA1 sum: %s", sha1)
except Exception, e:
logging.error("Failed to get SHA1 from file: %s", e)
else:
sha1 = None
destination_dir = os.path.dirname(destination)
if not os.path.isdir(destination_dir):
os.makedirs(destination_dir)
if not os.path.isfile(destination):
logging.warning("File %s not found", destination)
if interactive:
answer = genio.ask("Would you like to download it from %s?" % url)
else:
answer = 'y'
if answer == 'y':
download.url_download_interactive(url, destination,
"Downloading %s" % title)
had_to_download = True
else:
logging.warning("Missing file %s", destination)
else:
logging.info("Found %s", destination)
if sha1 is None:
answer = 'n'
else:
answer = 'y'
if answer == 'y':
actual_sha1 = crypto.hash_file(destination, algorithm='sha1')
if actual_sha1 != sha1:
logging.info("Actual SHA1 sum: %s", actual_sha1)
if interactive:
answer = genio.ask("The file seems corrupted or outdated. "
"Would you like to download it?")
else:
logging.info("The file seems corrupted or outdated")
answer = 'y'
if answer == 'y':
logging.info("Updating image to the latest available...")
while not file_ok:
download.url_download_interactive(url, destination,
title)
|
-*-
from time import time
from threading import Lock
from functools import wraps
STATE_CLOSED = "closed"
STATE_OPEN = "open"
STATE_HALF_OPEN = "half-open"
def get_now():
return int(time())
class CircuitBreakerError(Exception):
pass
class TooManyRequestsError(CircuitBreakerError):
pass
class OpenStateError(CircuitBreakerError):
pass
class Count(object):
__slots__ = ("requests", "total_successes", "total_failures",
"consecutive_successes", "consecutive_failures")
def __init__(self):
self.requests = 0
self.total_successes = 0
self.total_failures = 0
self.consecutive_successes = 0
self.consecutive_failures = 0
def on_request(self):
self.requests += 1
def on_success(self):
self.total_successes += 1
self.consecutive_successes += 1
self.consecutive_failures = 0
def on_failure(self):
self.total_failures += 1
self.consecutive_failures += 1
self.consecutive_successes = 0
def clear(self):
self.requests = 0
self.total_successes = 0
self.total_failures = 0
self.consecutive_successes = 0
self.consecutive_failures = 0
def copy(self):
c = self.__class__.__new__()
c.requests = self.requests
c.total_successes = c.total_successes
c.total_failures = c.total_failures
c.consecutive_successes = c.consecutive_successes
c.consecutive_failures = c.consecutive_failures
return c
class CircuitBreaker(object):
MAX_REQUESTS = 1
COUNT_INTERVAL = 0
RECOVERY_TIMEOUT = 60
FAILURE_THRESHOLD = 5
EXPECTED_EXCEPTION = Exception
def __init__(self, name=None, max_requests=None, count_interval=None,
recovery_timeout=None, failure_threshold=None,
expected_exception=None, on_state_change=None):
"""The Circuit Breaker.
"""
self._name = name
self._max_requests = max_requests or self.MAX_REQUESTS
self._count_interval = count_interval or self.COUNT_INTERVAL
self._recovery_timeout = recovery_timeout or self.RECOVERY_TIMEOUT
self._failure_threshold = failure_threshold or self.FAILURE_THRESHOLD
self._expected_exception = expected_exception or self.EXPECTED_EXCEPTION
self._on_state_change = on_state_change
self._state = STATE_CLOSED
self._generation = 0
self._count = Count()
self._expiry = 0
self._lock = Lock()
self._new_generation(get_now())
@property
def name(self):
"""Return the name of Circuit Breaker."""
return self._name
@property
def state(self):
"""Return the state of Circuit Breaker."""
with self._lock:
return self._current_state(get_now())[0]
@property
def is_open(self):
"""Return True if the Circuit Breaker is open. Or False."""
return self.state == STATE_OPEN
@property
def is_closed(self):
"""Return True if the Circuit Breaker is closed. Or False."""
return self.state == STATE_CLOSED
@property
def is_half_open(self):
"""Return True if the Circuit Breaker is half-open. Or False."""
return self.state == STATE_HALF_OPEN
@property
def count(self):
"""Return the count information of the requests."""
with | self._lock:
return self._count.copy()
def __call__(self, wrapped):
"""Decorate the function or method.
Notice: when decorating more than one function or method, you should
assign a unique name to the circuit breaker.
"""
if not self._name:
self._name = | wrapped.__name__
@wraps(wrapped)
def wrapper(*args, **kwargs):
return self.call(wrapped, *args, **kwargs)
CircuitBreakerMonitor.register(self)
return wrapper
def allow(self):
"""Checks if a new request can proceed.
It returns a callback that should be used to register the success
or failure in a separate step.
If the circuit breaker doesn't allow requests, it raises an exception.
"""
generation = self._before_request()
return lambda ok: self._after_request(generation, ok)
def call(self, func, *args, **kwargs):
"""Run the given request if the CircuitBreaker accepts it.
It raises an error if the CircuitBreaker rejects the request.
Otherwise, it will return the result of the request.
If an exception is raised in the request, the CircuitBreaker handles it
as a failure and reraises it again.
"""
generation = self._before_request()
try:
result = func(*args, **kwargs)
except self._expected_exception:
self._after_request(generation, False)
raise
else:
self._after_request(generation, True)
return result
def _before_request(self):
with self._lock:
now = get_now()
state, generation = self._current_state(now)
if state == STATE_OPEN:
raise OpenStateError
elif state == STATE_HALF_OPEN and self._count.requests >= self._max_requests:
raise TooManyRequestsError
self._count.on_request()
return generation
def _after_request(self, before_generation, ok):
with self._lock:
now = get_now()
state, generation = self._current_state(now)
if generation != before_generation:
return
(self._on_success if ok else self._on_failure)(state, now)
def _on_success(self, state, now):
if state == STATE_CLOSED:
self._count.on_success()
elif state == STATE_HALF_OPEN:
self._count.on_success()
if self._count.consecutive_successes >= self._max_requests:
self._set_statue(STATE_CLOSED, now)
def _on_failure(self, state, now):
if state == STATE_CLOSED:
self._count.on_failure()
if self._count.consecutive_failures > self._failure_threshold:
self._set_statue(STATE_OPEN, now)
elif state == STATE_HALF_OPEN:
self._set_statue(STATE_OPEN, now)
def _current_state(self, now):
state = self._state
if state == STATE_CLOSED:
if self._expiry and self._expiry < now:
self._new_generation(now)
elif state == STATE_OPEN:
if self._expiry < now:
self._set_statue(STATE_HALF_OPEN, now)
return self._state, self._generation
def _set_statue(self, state, now):
if self._state == state:
return
prev, self._state = self._state, state
self._new_generation(now)
if self._on_state_change:
self._on_state_change(self._name, prev, state)
def _new_generation(self, now):
self._generation += 1
self._count.clear()
state = self._state
if state == STATE_CLOSED:
self._expiry = (now + self._count_interval) if self._count_interval else 0
elif state == STATE_OPEN:
self._expiry = now + self._recovery_timeout
else: # STATE_HALF_OPEN
self._expiry = 0
class CircuitBreakerMonitor(object):
circuit_breakers = {}
@classmethod
def register(cls, cb):
"""Register a circuit breaker."""
cls.circuit_breakers[cb.name] = cb
@classmethod
def all_closed(cls):
"""Return True if all circuit breakers are closed."""
return not cls.get_all_open()
@classmethod
def get_all_circuit_breakers(cls):
"""Return all circuit breakers."""
return cls.circuit_breakers.values()
@classmethod
def get(cls, name):
"""Return the circuit breaker named 'name'."""
return cls.circuit_breakers.get(name, None)
@classmethod
def get_all_open(cls):
"""Return all open circuit breakers."""
return [cb for cb in cls.circuit_br |
"""URLs."""
from django.conf.urls import include, url
from django.contrib import admin
import apps.status.views
admin.autodiscover()
# Examples:
# url(r'^$', 'gettingstarted.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
urlpatterns = [
url(r'^$', apps.status.views.index, name='index'),
url(r'^trackings/(?P<carrier_slug>[\ | w-]+)/(?P<tracking_number>[\w-]+)/$',
apps.status.views.trackings),
url(r'^admin/', inc | lude(admin.site.urls)),
]
|
# vim: ts=4:sw=4:expandtab
# BleachBit
# Copyright (C) 2008-2020 Andrew Ziem
# https://www.bleachbit.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test | case for Common
"""
from tests import common
import bleachbit
import os
class CommonTestCase(common.BleachbitTestCase):
"""Test case for Common."""
def test_expandvars(self):
"""Unit test for expandvars."""
var = os.path.expandvars('$HOME')
self.assertIsString(var)
def test_environment(self):
"""Test for important environment variables"""
# useful for researching
# grep -Poh "([\\$%]\w+)" cleaners/*xml | cut -b2- | sort | | uniq -i
envs = {'posix': ['XDG_DATA_HOME', 'XDG_CONFIG_HOME', 'XDG_CACHE_HOME', 'HOME'],
'nt': ['AppData', 'CommonAppData', 'Documents', 'ProgramFiles', 'UserProfile', 'WinDir']}
for env in envs[os.name]:
e = os.getenv(env)
self.assertIsNotNone(e)
self.assertGreater(len(e), 4)
def test_expanduser(self):
"""Unit test for expanduser."""
# Return Unicode when given Unicode.
self.assertIsString(os.path.expanduser('~'))
# Blank input should give blank output.
self.assertEqual(os.path.expanduser(''), '')
# An absolute path should not be altered.
abs_dirs = {'posix': '$HOME', 'nt': '%USERPROFILE%'}
abs_dir = os.path.expandvars(abs_dirs[os.name])
self.assertExists(abs_dir)
self.assertEqual(os.path.expanduser(abs_dir), abs_dir)
# A relative path (without a reference to the home directory)
# should not be expanded.
self.assertEqual(os.path.expanduser('common'), 'common')
|
from helper import unittest, PillowTestCase
from PIL import Image, CurImagePlugin
TEST_FILE = "Tests/images/deerstalker.cur"
class TestFileCur(PillowTestCase):
def test_sanity(self):
im = Image.open(TEST_FILE)
self.assertEqual(im.size, (32, 32))
self.assertIsInstance(im, CurImagePlugin.CurImageFile)
# Check some pixel colors to ensure image is loaded properly
self.assertEqual(im.getpixel | ((10, 1)), (0, 0, 0, 0))
self.assertEqual(im.getpixel((11, 1)), (253, 254, 254, 1))
self.assertEqual(im.getpixel((16, 16)), (84, 87, 86, 255))
def test_invalid_file(self):
invalid_file = "Tests/images/flower.jpg"
self.assertRaises(SyntaxError,
lambda: CurImagePlugin.CurImageFile(invalid_file))
no_curs | ors_file = "Tests/images/no_cursors.cur"
cur = CurImagePlugin.CurImageFile(TEST_FILE)
with open(no_cursors_file, "rb") as cur.fp:
self.assertRaises(TypeError, cur._open)
if __name__ == '__main__':
unittest.main()
|
ld.expect('is_authenticated\(\)\r\n')
child.expect('get_username\(\)\r\n')
child.expect('echo_ints\(ints\)\r\n')
child.sendline('exec echo "Hello Server"')
child.expect('Server replied: "Hello Server"\r\n')
child.sendline('exec add 5 6')
child.expect('Server replied: 11\r\n')
child.sendline('exec is_authenticated')
child.expect('Server replied: false\r\n')
child.sendline('exec get_username')
child.expect('Server replied: null\r\n')
finally:
child.close(True)
server.stop()
def test_rpcsh_expect_http(self):
try:
server = ServerRunner('../examples/serverhttp.py', 5500)
server.run()
python = sys.executable
child = pexpect.spawn('%s ../rpcsh localhost 5500 --http' % (python))
child.expect('ReflectRPC Shell\r\n')
child.expect('================\r\n\r\n')
child.expect("Type 'help' for available commands\r\n\r\n")
child.expect('RPC server: localhost:5500\r\n\r\n')
child.expect('Self-description of the Service:\r\n')
child.expect('================================\r\n')
child.expect('Example RPC Service \(1.0\)\r\n')
child.expect('This is an example service for ReflectRPC\r\n')
child.expect('\(rpc\) ')
child.sendline('list')
child.expect('echo\(message\)\r\n')
child.expect('add\(a, b\)\r\n')
child.expect('sub\(a, b\)\r\n')
child.expect('mul\(a, b\)\r\n')
child.expect('div\(a, b\)\r\n')
child.expect('enum_echo\(phone_type\)\r\n')
child.expect('hash_echo\(address\)\r\n')
child.expect('notify\(value\)\r\n')
child.expect('is_authenticated\(\)\r\n')
child.expect('get_username\(\)\r\n')
child.expect('echo_ints\(ints\)\r\n')
child.sendline('exec echo "Hello Server"')
child.expect('Server replied: "Hello Server"\r\n')
child.sendline('exec add 5 6')
child.expect('Server replied: 11\r\n')
child.sendline('exec is_authenticated')
child.expect('Server replied: false\r\n')
child.sendline('exec get_username')
child.expect('Server replied: null\r\n')
finally:
child.close(True)
server.stop()
def test_rpcsh_expect_http_basic_auth(self):
try:
server = ServerRunner('../examples/serverhttp_basic_auth.py', 5500)
server.run()
python = sys.executable
child = pexpect.spawn('%s ../rpcsh localhost 5500 --http --http-basic-user testuser' % (python))
child.expect('Password: ')
child.sendline('123456')
child.expect('ReflectRPC Shell\r\n')
child.expect('================\r\n\r\n')
child.expect("Type 'help' for available commands\r\n\r\n")
child.expect('RPC server: localhost:5500\r\n\r\n')
child.expect('Self-description of the Service:\r\n')
child.expect('================================\r\n')
child.expect('Example RPC Service \(1.0\)\r\n')
child.expect('This is an example service for ReflectRPC\r\n')
child.expect('\(rpc\) ')
child.sendline('list')
child.expect('echo\(message\)\r\n')
child.expect('add\(a, b\)\r\n')
child.expect('sub\(a, b\)\r\n')
child.expect('mul\(a, b\)\r\n')
child.expect('div\(a, b\)\r\n')
child.expect('enum_echo\(phone_type\)\r\n')
child.expect('hash_echo\(address\)\r\n')
child.expect('notify\(value\)\r\n')
child.expect('is_authenticated\(\)\r\n')
child.expect('get_username\(\)\r\n')
child.expect('echo_ints\(ints\)\r\n')
child.sendline('exec echo "Hello Server"')
child.expect('Server replied: "Hello Server"\r\n')
child.sendline('exec add 5 6')
child.expect('Server replied: 11\r\n')
child.sendline('exec is_authenticated')
child.expect('Server replied: true\r\n')
child.sendline('exec get_username')
child.expect('Server replied: "testuser"\r\n')
finally:
child.close(True)
server.stop()
def test_rpcsh_expect_http_basic_auth_fail(self):
try:
server = ServerRunner('../examples/serverhttp_basic_auth.py', 5500)
server.run()
python = sys.executable
child = pexpect.spawn('%s ../rpcsh localhost 5500 --http --http-basic-user testuser' % (python))
child.expect('Password: ')
child.send('wrongpassword\r\n')
child.expect('Authentication failed\r\n\r\n')
child.expect('Failed to connect to localhost on TCP port 5500\r\n')
finally:
child.close(True)
server.stop()
def test_rpcsh_expect_tls(self):
try:
server = ServerRunner('../examples/servertls.py', 5500)
server.run()
python = sys.executable
child = pexpect.spawn('%s ../rpcsh localhost 5500 --tls' % (python))
child.expect('ReflectRPC Shell\r\n')
child.expect('================\r\n\r\n')
child.expect("Type 'help' for available commands\r\n\r\n")
child.expect('RPC server: localhost:5500\r\n\r\n')
child.expect('Self-description of the Service:\r\n')
child.expect('================================\r\n')
child.expect('Example RPC Service \(1.0\)\r\n')
child.expect('This is an example service for ReflectRPC\r\n')
child.expect('\(rpc\) ')
child.sendline('list')
child.expect('echo\(message\)\r\n')
child.expect('add\(a, b\)\r\n')
child.expect('sub\(a, b\)\r\n')
child.expect('mul\(a, b\)\r\n')
child.expect('div\(a, b\)\r\n')
child.expect('enum_echo\(phone_type\)\r\n')
child.expect('hash_echo\(address\)\r\n')
child.expect('notify\(value\)\r\n')
child.expect('is_authenticated\(\)\r\n')
child.expect('get_username\(\)\r\n')
child.expect('echo_ints\(ints\)\r\n')
child.sendline('exec echo "Hello Server"')
child.expect('Server replied: "Hello Server"\r\n')
child.sendline('exec add 5 6')
child.expect('Server replied: 11\r\n')
child.sendline('exec is_authenticated')
child.expect('Server replied: false\r\n')
child.sendline('exec get_username')
child.expect('Server replied: null\r\n')
finally:
child.close(True)
server.stop()
def test_rpcsh_expect_tls_client_auth(self):
try:
server = ServerRunner('../examples/servertls_clientauth.py', 5500)
server.run()
python = sys.executable
child = pexpect.spawn('%s ../rpcsh localhost | 5500 --tls --ca .. | /examples/certs/rootCA.crt --key ../examples/certs/client.key --cert ../examples/certs/client.crt' % (python))
child.expect('ReflectRPC Shell\r\n')
child.expect('================\r\n\r\n')
child.expect("Type 'help' for available commands\r\n\r\n")
child.expect('RPC server: localhost:5500\r\n\r\n')
child.expect('Self-description of the Service:\r\n')
child.expect('================================\r\n')
child.expect('Example RPC Service \(1.0\)\r\n')
child.expect('This is an example service for ReflectRPC\r\n')
child.expect('\(rpc\) ')
child.sendline('list')
child.expect('echo\(message\)\r\n')
child.expect('add\(a, b\)\r\n')
child.expect('sub\(a, b\)\r\n')
child.expect('mul\(a, b\)\r\n')
child.expect('div\(a, b\)\r\n')
ch |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GKE nodes service account permissions for logging.
The service account used by GKE nodes shoul | d have the logging.logWrit | er
role, otherwise ingestion of logs won't work.
"""
from gcpdiag import lint, models
from gcpdiag.queries import gke, iam
ROLE = 'roles/logging.logWriter'
def prefetch_rule(context: models.Context):
# Make sure that we have the IAM policy in cache.
project_ids = {c.project_id for c in gke.get_clusters(context).values()}
for pid in project_ids:
iam.get_project_policy(pid)
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
# Find all clusters with logging enabled.
clusters = gke.get_clusters(context)
iam_policy = iam.get_project_policy(context.project_id)
if not clusters:
report.add_skipped(None, 'no clusters found')
for _, c in sorted(clusters.items()):
if not c.has_logging_enabled():
report.add_skipped(c, 'logging disabled')
else:
# Verify service-account permissions for every nodepool.
for np in c.nodepools:
sa = np.service_account
if not iam.is_service_account_enabled(sa, context.project_id):
report.add_failed(np, f'service account disabled or deleted: {sa}')
elif not iam_policy.has_role_permissions(f'serviceAccount:{sa}', ROLE):
report.add_failed(np, f'service account: {sa}\nmissing role: {ROLE}')
else:
report.add_ok(np)
|
pyright 2012 SINA Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Extracts OpenStack config option info from module(s)."""
from __future__ import print_function
import imp
import os
import re
import socket
import sys
import textwrap
from oslo.config import cfg
from ripcord.openstack.common import gettextutils
from ripcord.openstack.common import importutils
gettextutils.install('ripcord')
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
MULTISTROPT: 'multi valued',
}
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../../../"))
WORDWRAP_WIDTH = 60
def generate(srcfiles):
mods_by_pkg = dict()
for filepath in srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys())
pkg_names.sort()
ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys())
ext_names.sort()
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
for module_name in os.getenv(
"OSLO_CONFIG_GENERATOR_EXTRA_MODULES", "").split(','):
module = _import_module(module_name)
if module:
for group, opts in _list_opts(module):
opts_by_group.setdefault(group, []).append((module_name, opts))
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rfind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
continue
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group, opts in opts_by_group.items():
print_group_opts(group, opts)
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
return importutils.import_module(mod_str)
except ImportError as ie:
sys.stderr.write("%s\n" % str(ie))
return None
except Exception:
return None
def _is_in_group(opt, group):
"Check if opt is in group."
for key, value in group._opts.items():
if value['opt'] == opt:
return True
return False
def _guess_groups(opt, mod_obj):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
# what other groups is it in?
for key, value in cfg.CONF.items():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
raise RuntimeError(
"Unable to find group for option %s, "
"maybe it's defined twice in the same group?"
% opt.name
)
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
for | attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all(map(lambda x: is_opt(x), attr_obj))):
opts.extend(attr_obj)
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print("[%s]" % group)
print('')
for mod, opts in opts_by_ | module:
print('#')
print('# Options defined in %s' % mod)
print('#')
print('')
for opt in opts:
_print_opt(opt)
print('')
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(name, value):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
if value.startswith(sys.prefix):
# NOTE(jd) Don't use os.path.join, because it is likely to think the
# second part is an absolute pathname and therefore drop the first
# part.
value = os.path.normpath("/usr/" + value[len(sys.prefix):])
elif value.startswith(BASEDIR):
return value.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in value:
return value.replace(BASEDIR, '')
elif value == _get_my_ip():
return '10.0.0.1'
elif value == socket.gethostname() and 'host' in name:
return 'ripcord'
elif value.strip() != value:
return '"%s"' % value
return value
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_help = ""
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help += ' (' + OPT_TYPES[opt_type] + ')'
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
try:
if opt_default is None:
print('#%s=<None>' % opt_name)
elif opt_type == STROPT:
assert(isinstance(opt_default, basestring))
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
opt_default)))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, str(opt_default).lower()))
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print('#%s=%s' % (opt_name, ','.join(opt_default)))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print('#%s=%s' % (opt_name, default))
print('')
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
def main():
generate(s |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run specific test on specific environment."""
import logging
import os
import tempfile
from pylib import constants
from pylib.base import base_test_result
from pylib.remote.device import remote_device_test_run
from pylib.remote.device import remote_device_helper
_EXTRA_COMMAND_LINE_FILE = (
'org.chromium.native_test.NativeTestActivity.CommandLineFile')
class RemoteDeviceGtestTestRun(remote_device_test_run.RemoteDeviceTestRun):
| """Run gtests and uirobot tests on a remote device."""
DEFAULT_RUNNER_PACKAGE = (
'org.chromium.native_test.NativeTestInstrumentationTestRunner')
#override
def TestPackage(self):
return self._test_instance.suite
#override
def _TriggerSetUp(self):
"""Set up the triggering of a test run."""
logging.inf | o('Triggering test run.')
if self._env.runner_type:
logging.warning('Ignoring configured runner_type "%s"',
self._env.runner_type)
if not self._env.runner_package:
runner_package = self.DEFAULT_RUNNER_PACKAGE
logging.info('Using default runner package: %s',
self.DEFAULT_RUNNER_PACKAGE)
else:
runner_package = self._env.runner_package
dummy_app_path = os.path.join(
constants.GetOutDirectory(), 'apks', 'remote_device_dummy.apk')
with tempfile.NamedTemporaryFile(suffix='.flags.txt') as flag_file:
env_vars = {}
filter_string = self._test_instance._GenerateDisabledFilterString(None)
if filter_string:
flag_file.write('_ --gtest_filter=%s' % filter_string)
flag_file.flush()
env_vars[_EXTRA_COMMAND_LINE_FILE] = os.path.basename(flag_file.name)
self._test_instance._data_deps.append(
(os.path.abspath(flag_file.name), None))
self._AmInstrumentTestSetup(
dummy_app_path, self._test_instance.apk, runner_package,
environment_variables=env_vars)
_INSTRUMENTATION_STREAM_LEADER = 'INSTRUMENTATION_STATUS: stream='
#override
def _ParseTestResults(self):
logging.info('Parsing results from stdout.')
results = base_test_result.TestRunResults()
output = self._results['results']['output'].splitlines()
output = (l[len(self._INSTRUMENTATION_STREAM_LEADER):] for l in output
if l.startswith(self._INSTRUMENTATION_STREAM_LEADER))
results_list = self._test_instance.ParseGTestOutput(output)
results.AddResults(results_list)
if self._env.only_output_failures:
logging.info('See logcat for more results information.')
self._DetectPlatformErrors(results)
return results
|
#!/usr/bin/env python
"""
================================================
ABElectronics IO Pi | Digital I/O Read and Write Demo
Requires python smbus to be installed
For Python 2 install with: sudo apt-get install python-smbus
For Python 3 install with: sudo apt-get install python3-smbus
run with: python demo_iopireadwrite.py
================================================
This example reads pin 1 of bus 1 on the IO Pi board and sets
pin 1 of bus 2 to match.
The internal pull-up resistors are enabled so the input pin
will read as 1 unless the pin is connected to ground.
Initialise the IOPi device using the default addresses, you will need to
change the addresses if you have changed the jumpers on the IO Pi
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
import time
try:
from IOPi import IOPi
except ImportError:
print("Failed to import IOPi from python system path")
print("Importing from pare | nt folder instead")
try:
import sys
sys.path.appe | nd("..")
from IOPi import IOPi
except ImportError:
raise ImportError(
"Failed to import library from parent folder")
def main():
"""
Main program function
"""
# create two instances of the IoPi class called iobus1 and iobus2 and set
# the default i2c addresses
iobus1 = IOPi(0x20) # bus 1 will be inputs
iobus2 = IOPi(0x21) # bus 2 will be outputs
# Each bus is divided up two 8 bit ports. Port 0 controls pins 1 to 8,
# Port 1 controls pins 9 to 16.
# We will read the inputs on pin 1 of bus 1 so set port 0 to be inputs and
# enable the internal pull-up resistors
iobus1.set_port_direction(0, 0xFF)
iobus1.set_port_pullups(0, 0xFF)
# We will write to the output pin 1 on bus 2 so set port 0 to be outputs
# and turn off the pins on port 0
iobus2.set_port_direction(0, 0x00)
iobus2.write_port(0, 0x00)
while True:
# read pin 1 on bus 1. If pin 1 is high set the output on
# bus 2 pin 1 to high, otherwise set it to low.
# connect pin 1 on bus 1 to ground to see the output on
# bus 2 pin 1 change state.
if iobus1.read_pin(1) == 1:
iobus2.write_pin(1, 1)
else:
iobus2.write_pin(1, 0)
# wait 0.1 seconds before reading the pins again
time.sleep(0.1)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
import os
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='CHANGEME!!!')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
| # CACHING
# --------------------------------------------------- | ---------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
STRIPE_PUBLIC_KEY = os.environ.get("STRIPE_PUBLIC_KEY", "pk_test_4XMRbU6H6Jf5B2TXmICnvXS7")
STRIPE_SECRET_KEY = os.environ.get("STRIPE_SECRET_KEY", "sk_test_4XMRnH3aMfrhHN1nZO2uzcDE")
DJSTRIPE_PLANS = {
"monthly": {
"stripe_plan_id": "pro-monthly",
"name": "Web App Pro ($24.99/month)",
"description": "The monthly subscription plan to WebApp",
"price": 2499, # $24.99
"currency": "usd",
"interval": "month"
},
"yearly": {
"stripe_plan_id": "pro-yearly",
"name": "Web App Pro ($199/year)",
"description": "The annual subscription plan to WebApp",
"price": 19900, # $199.00
"currency": "usd",
"interval": "year"
}
}
|
l encoded string."""
try:
if type(value) is unicode:
base64.urlsafe_b64decode(value.encode('ascii'))
else:
base64.urlsafe_b64decode(value)
except TypeError:
raise ValidationError(
'{method} {path}: expected base64url but received {value}'.format(
method=method, path=path, value=value))
def _ValidateDate(self, method, path, value):
"""Validates an RFC3339 date."""
if not self.DATE_REGEX.match(value):
raise ValidationError(
'{method} {path}: expected RFC3339 date, but received {value}'.format(
method=method, path=path, value=value))
def _ValidateDateTime(self, method, path, value):
"""Validates RFC3339 timestamp."""
if not self.DATE_TIME_REGEX.match(value):
raise ValidationError(
'{method} {path}: expected RFC3339 date, but received {value}'.format(
method=method, path=path, value=value))
def _ValidateInt64(self, method, path, value):
"""Validates an int64 value MIN_INT64 <= value <= MAX_INT64."""
try:
long_value = long(value)
except ValueError:
raise ValidationError(
'{method} {path}: expected int64 but received {value}'.format(
method=method, path=path, value=value))
if not self.MIN_INT64 <= long_value <= self.MAX_INT64:
raise ValidationError(
'{method} {path}: int64 value {value} not in range '
'MIN_INT64..MAX_INT64'.format(
method=method, path=path, value=value))
def _ValidateUInt64(self, method, path, value):
"""Validates an uint64 value 0 <= value <= MAX_INT64."""
try:
long_value = long(value)
except ValueError:
raise ValidationError(
'{method} {path}: expected int64 but received {value}'.format(
method=method, path=path, value=value))
if not self.MIN_UINT64 <= long_value <= self.MAX_UINT64:
raise ValidationError(
'{method} {path}: int64 value {value} not in range '
'MIN_INT64..MAX_INT64'.format(
method=method, path=path, value=value))
def Validate(self, method, path, value):
if not isinstance(value, basestring):
raise ValidationError(
'{method} {path}: expected string, but received {value}'.format(
method=method, path=path, value=value))
if self._format == 'byte':
self._ValidateByte(method, path, value)
elif self._format == 'date':
self._ValidateDate(method, path, value)
elif self._format == 'date-time':
self._ValidateDateTime(method, path, value)
elif self._format == 'int64':
self._ValidateInt64(method, path, value)
elif self._format == 'uint64':
self._ValidateUInt64(method, path, value)
def ValidateString(self, method, path, value):
self.Validate(method, path, value)
class IntegerType(_ApiType):
"""Represents an integer type in the API type system."""
__slots__ = ('_format',)
def __init__(self, value_format):
self._format = value_format
def Validate(self, method, path, value):
if not isinstance(value, (int, long)):
raise ValidationError(
'{method} {path}: expected int32, but received {value}'.format(
method=method, path=path, value=value))
if self._format == 'uint32':
if not 0 <= value <= 4294967295:
raise ValidationError(
'{method} {path}: value {value} not in the uint32 range '
'0 .. 4294967295'.format(method=method, path=path, value=value))
elif not -2147483648 <= value <= 2147483647:
raise ValidationError(
'{method} {path}: value {value} not in the int32 range '
'-2147483648 .. 2147483647'.format(
method=method, path=path, value=value))
def ValidateString(self, method, path, value):
try:
integer_value = long(value)
except ValueError:
raise ValidationError(
'{method} {path}: value {value} not an integer'.format(
method=method, path=path, value=value))
self.Validate(method, path, integer_value)
class NumberType(_ApiType):
"""Represents a floating point number in the API type system."""
__slots__ = ('_format',)
def __init__(self, value_format):
self._format = value_format
def Validate(self, method, path, value):
if not isinstance(value, (int, long, float)):
raise ValidationError(
'{method} {path}: expected number but received {value}'.format(
method=method, path=path, value=value))
def ValidateString(self, method, path, value):
try:
float_value = float(value)
except ValueError:
raise ValidationError(
'{method} {path}: expected number but received {value}'.format(
method=method, path=path, value=value))
self.Validate(method, path, float_value)
class ArrayType(_ApiType):
__slots__ = ('_element',)
def __init__(self, element):
self._element = element
def Validate(self, method, path, value):
if not isinstance(value, (list, tuple)):
raise ValidationError(
'{method} {path}: expected array but received {value}'.format(
method=method, path=path, value=value))
for index, element in enumerate(value):
self._element.Validate(method,
'{path}[{index}]'.format(path=path, index=index),
element)
class ObjectType(_ApiType):
"""Represents an API object tyoe."""
__slots__ = ('_name', '_properties', '_additional')
def __init__(self):
self._name = None
self._properties = None
self._additional = None
def __str__(self):
return '<{name} {properties}>'.format(
name=self._name, properties=sorted(self._properties))
def Define(self, name, properties, additional):
self._name = name
self._properties = dict((object_property.name, object_property)
for object_property in properties or [])
self._additional = additional
def Validate(self, method, path, value):
if not isinstance(value, dict):
raise ValidationError(
'{method} {path}: expected dict but received {value}'.format(
method=method, path=path, value=value))
for property_name, property_value in value.iteritems():
named_property = self._properties.get(property_name)
if named_property is None:
if not self._additional:
raise ValidationError(
'{method} {path}: Unexpected property {name} in {value}.'.format(
method=method, path=path, name=property_name, value=value))
self._additional.Validate(
method,
'{path}[{name}]'.format(path=path, name=property_name),
property_value)
else:
named_property.type.Validate(
method,
'{path}.{name}'.format(path=path, name=property_name),
property_value)
class Method(object):
"""Represents an API resource collection m | ethod."""
__slots__ = ('_id', '_name', '_path', '_parameters', '_request', '_response')
PARAMET | ER_RE = re.compile('^{(\\w+)}$')
def __init__(self, method_id, name, path, parameters, request, response):
self._id = method_id
self._name = name
self._path = path
self._parameters = parameters
self._request = request
self._response = response
@staticmethod
def _TryStringToJson(string):
try:
return json.loads(string)
# pylint: disable=bare-except
except:
return string
def _ValidateParameters(self, uri):
parsed_uri = urlparse.urlsplit(uri)
parsed_path = urlparse.urlsplit(self._path)
if parsed_uri.scheme != parsed_path.scheme:
raise ValidationError(
'Incompatible URL schemes: {value} / {template}'.format(
value=parsed_uri.scheme, template=parsed_path.scheme))
if parsed_uri.netloc != parsed_path.netloc:
raise ValidationError(
'Incompatible URL netlocs: {value} / {template}'.format(
value=parsed_uri.netloc, template=parsed_path.netloc))
# Validate the path contents
split_url_path = parsed_uri.path.split('/')
split_tem |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.util.contextutil import temporary_dir
from pants_test.backend.jvm.tasks.jvm_compile.base_compile_integration_test import BaseCompileIT
class AptCompileIntegrationTest(BaseCompileIT):
def test_apt_compile(self):
with self.do_test_compile('testprojects/src/java/org/pantsbuild/testproject/annotation/processor',
expected_files=['ResourceMappingProcessor.class',
'javax.annotation.processing.Processor']) as found:
self.assertTrue(
self.get_only(found, 'ResourceMappingProcessor.class').endswith(
'org/pantsbuild/testproject/annotation/processor/ResourceMappingProcessor.class'))
processor_service_files = found['javax.annotation.processing.Processor']
# There should be only a per-target service info file.
self.assertEqual(1, len(processor_service_files))
processor_service_file = list(processor_service_files)[0]
self | .assertTrue(processor_service_file.endswith(
'META-INF/services/javax.annotation.processing.Processor'))
with open(processor_service_file) as fp:
self.assertEqual('org.pantsbuild.testproject.annotation.processor.ResourceMappingProcessor',
fp.read().strip())
def test_apt_compile_and_run(self):
with self.do_test_compile('testprojects/src/java/org/pantsbuild/testproject/annotation/main',
| expected_files=['Main.class',
'deprecation_report.txt']) as found:
self.assertTrue(
self.get_only(found, 'Main.class').endswith(
'org/pantsbuild/testproject/annotation/main/Main.class'))
# This is the proof that the ResourceMappingProcessor annotation processor was compiled in a
# round and then the Main was compiled in a later round with the annotation processor and its
# service info file from on its compile classpath.
with open(self.get_only(found, 'deprecation_report.txt')) as fp:
self.assertIn('org.pantsbuild.testproject.annotation.main.Main', fp.read().splitlines())
def test_stale_apt_with_deps(self):
"""An annotation processor with a dependency doesn't pollute other annotation processors.
At one point, when you added an annotation processor, it stayed configured for all subsequent
compiles. Meaning that if that annotation processor had a dep that wasn't on the classpath,
subsequent compiles would fail with missing symbols required by the stale annotation processor.
"""
# Demonstrate that the annotation processor is working
with self.do_test_compile(
'testprojects/src/java/org/pantsbuild/testproject/annotation/processorwithdep/main',
expected_files=['Main.class', 'Main_HelloWorld.class', 'Main_HelloWorld.java']) as found:
gen_file = self.get_only(found, 'Main_HelloWorld.java')
self.assertTrue(gen_file.endswith(
'org/pantsbuild/testproject/annotation/processorwithdep/main/Main_HelloWorld.java'),
msg='{} does not match'.format(gen_file))
# Try to reproduce second compile that fails with missing symbol
with temporary_dir(root_dir=self.workdir_root()) as workdir:
with temporary_dir(root_dir=self.workdir_root()) as cachedir:
# This annotation processor has a unique external dependency
self.assert_success(self.run_test_compile(
workdir,
cachedir,
'testprojects/src/java/org/pantsbuild/testproject/annotation/processorwithdep::'))
# When we run a second compile with annotation processors, make sure the previous annotation
# processor doesn't stick around to spoil the compile
self.assert_success(self.run_test_compile(
workdir,
cachedir,
'testprojects/src/java/org/pantsbuild/testproject/annotation/processor::',
clean_all=False))
|
"""Tests for tensorflow.ops.tf.gather."""
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
class GatherTest(tf.test.TestCase):
def testScalar1D(self):
with self.test_session():
params = tf.constant([0, 1, 2, 3, 7, 5])
indices = tf.constant(4)
gather_t = tf.gather(params, indices)
gather_val = gather_t.eval()
self.assertAllEqual(7, gather_val)
self.assertEqual([], gather_t.get_shape())
def testScalar2D(self):
with self.test_session():
params = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8],
[9, 10, 11], [12, 13, 14]])
indices = tf.constant(2)
gather_t = tf.gather(params, indices)
gather_val = gather_t.eval()
self.assertAllEqual([6, 7, 8], gather_val)
self.assertEqual([3], gather_t.get_shape())
def testSimpleTwoD32(self):
with self.test_session():
params = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8],
[9, 10, 11], [12, 13, 14]])
| indices = tf.constant([0, 4, 0, 2])
gather_t = tf.gather(params, indices)
gather_val = gather_t.eval()
self.assertAllEqual([[0, 1, 2], [12, 13, 14], [0, 1, 2], [6, 7, | 8]],
gather_val)
self.assertEqual([4, 3], gather_t.get_shape())
def testHigherRank(self):
np.random.seed(1)
shape = (4, 3, 2)
params = np.random.randn(*shape)
indices = np.random.randint(shape[0], size=15).reshape(3, 5)
with self.test_session():
tf_params = tf.constant(params)
tf_indices = tf.constant(indices)
gather = tf.gather(tf_params, tf_indices)
self.assertAllEqual(params[indices], gather.eval())
self.assertEqual(indices.shape + params.shape[1:], gather.get_shape())
# Test gradients
gather_grad = np.random.randn(*gather.get_shape().as_list())
params_grad, indices_grad = tf.gradients(gather, [tf_params, tf_indices],
gather_grad)
self.assertEqual(indices_grad, None)
self.assertEqual(type(params_grad), tf.IndexedSlices)
params_grad = tf.convert_to_tensor(params_grad)
correct_params_grad = np.zeros(shape)
for i, g in zip(indices.ravel(), gather_grad.reshape((15,) + shape[1:])):
correct_params_grad[i] += g
self.assertAllEqual(correct_params_grad, params_grad.eval())
def testUnknownIndices(self):
params = tf.constant([[0, 1, 2]])
indices = tf.placeholder(tf.int32)
gather_t = tf.gather(params, indices)
self.assertEqual(None, gather_t.get_shape())
if __name__ == "__main__":
tf.test.main()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .envconf impo | rt *
| from .path import *
__version__ = '0.3.5'
|
'to die; killing') %
{'kill_wait': kill_wait, 'server': server})
# Send SIGKILL to all remaining pids
for pid in set(pids.keys()) - killed_pids:
print(_('Signal %(server)s pid: %(pid)s signal: '
'%(signal)s') % {'server': server,
'pid': pid,
'signal': signal.SIGKILL})
# Send SIGKILL to process group
try:
kill_group(pid, signal.SIGKILL)
except OSError as e:
# PID died before kill_group can take action?
if e.errno != errno.ESRCH:
raise
else:
print(_('Waited %(kill_wait)s seconds for %(server)s '
'to die; giving up') %
{'kill_wait': kill_wait, 'server': server})
return 1
@command
def kill(self, **kwargs):
"""stop a server (no error if not running)
"""
status = self.stop(**kwargs)
kwargs['quiet'] = True
if status and not self.status(**kwargs):
# only exit error if the server is still running
return status
return 0
@command
def shutdown(self, **kwargs):
"""allow current requests to finish on supporting servers
"""
kwargs['graceful'] = True
status = 0
status += self.stop(**kwargs)
return status
@command
def restart(self, **kwargs):
"""stops then restarts server
"""
status = 0
status += self.stop(**kwargs)
status += self.start(**kwargs)
return status
@command
def reload(self, **kwargs):
"""graceful shutdown then restart on supporting servers
"""
kwargs['graceful'] = True
status = 0
for server in self.server_names:
m = Manager([server])
status += m.stop(**kwargs)
status += m.start(**kwargs)
return status
@command
def force_reload(self, **kwargs):
"""alias for reload
"""
return self.reload(**kwargs)
def get_command(self, cmd):
"""Find and return the decorated method named like cmd
:param cmd: the command to get, a string, if not found raises
UnknownCommandError
"""
cmd = cmd.lower().replace('-', '_')
f = getattr(self, cmd, None)
if f is None:
raise UnknownCommandError(cmd)
if not hasattr(f, 'publicly_accessible'):
raise UnknownCommandError(cmd)
return f
@classmethod
def list_commands(cls):
"""Get all publicly accessible commands
:returns: a list of string tuples (cmd, help), the method names who are
decorated as commands
"""
get_method = lambda cmd: getattr(cls, cmd)
return sorted([(x.replace('_', '-'), get_method(x).__doc__.strip())
for x in dir(cls) if
getattr(get_method(x), 'publicly_accessible', False)])
def run_command(self, cmd, **kwargs):
"""Find the named command and run it
:param cmd: the command name to run
"""
f = self.get_command(cmd)
return f(**kwargs)
class Server(object):
"""Manage operations on a server or group of servers of similar type
:param server: name of server
"""
def __init__(self, server, run_dir=RUN_DIR):
self.server = server.lower()
if '.' in self.server:
self.server, self.conf = self.server.rsplit('.', 1)
else:
self.conf = None
if '-' not in self.server:
self.server = '%s-server' % self.server
self.type = self.server.rsplit('-', 1)[0]
self.cmd = 'swift-%s' % self.server
self.procs = []
self.run_dir = run_dir
def __str__(self):
return self.server
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(str(self)))
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
try:
return self.server == other.server
except AttributeError:
return False
def __ne__(self, other):
return not self.__eq__(other)
def get_pid_file_name(self, conf_file):
"""Translate conf_file to a corresponding pid_file
:param conf_file: an conf_file for this server, a string
:returns: the pid_file for this conf_file
"""
return conf_file.replace(
| os.path.normpath(SWIFT_DIR), self.run_dir, 1).replace(
'%s-server' % self.type, self.server, 1).replace(
'.conf', '.pid', 1)
|
def get_conf_file_name(self, pid_file):
"""Translate pid_file to a corresponding conf_file
:param pid_file: a pid_file for this server, a string
:returns: the conf_file for this pid_file
"""
if self.server in STANDALONE_SERVERS:
return pid_file.replace(
os.path.normpath(self.run_dir), SWIFT_DIR, 1).replace(
'.pid', '.conf', 1)
else:
return pid_file.replace(
os.path.normpath(self.run_dir), SWIFT_DIR, 1).replace(
self.server, '%s-server' % self.type, 1).replace(
'.pid', '.conf', 1)
def conf_files(self, **kwargs):
"""Get conf files for this server
:param: number, if supplied will only lookup the nth server
:returns: list of conf files
"""
if self.server in STANDALONE_SERVERS:
server_search = self.server
else:
server_search = "%s-server" % self.type
if self.conf is not None:
found_conf_files = search_tree(SWIFT_DIR, server_search,
self.conf + '.conf',
dir_ext=self.conf + '.conf.d')
else:
found_conf_files = search_tree(SWIFT_DIR, server_search + '*',
'.conf', dir_ext='.conf.d')
number = kwargs.get('number')
if number:
try:
conf_files = [found_conf_files[number - 1]]
except IndexError:
conf_files = []
else:
conf_files = found_conf_files
if not conf_files:
# maybe there's a config file(s) out there, but I couldn't find it!
if not kwargs.get('quiet'):
if number:
print(_('Unable to locate config number %(number)s for'
' %(server)s') %
{'number': number, 'server': self.server})
else:
print(_('Unable to locate config for %s') % self.server)
if kwargs.get('verbose') and not kwargs.get('quiet'):
if found_conf_files:
print(_('Found configs:'))
for i, conf_file in enumerate(found_conf_files):
print(' %d) %s' % (i + 1, conf_file))
return conf_files
def pid_files(self, **kwargs):
"""Get pid files for this server
:param: number, if supplied will only lookup the nth server
:returns: list of pid files
"""
if self.conf is not None:
pid_files = search_tree(self.run_dir, '%s*' % self.server,
exts=[self.conf + '.pid',
self.conf + '.pid.d'])
else:
pid_files = search_tree(self.run_dir, '%s*' % self.server)
if kwargs.get('number', 0):
conf_files = self.conf_files(**kwargs)
# filter pid_files to match the index of numbered conf_file
pid_files = [pid_file for pid_file in pid_files if
self.get_conf_file_name(pid_f |
ate="%s", transitions=%r.'
% (state.__class__.__name__, transitions))
for name in transitions:
pattern, method, next_state = state.transitions[name]
match = pattern.match(self.line)
if match:
if self.debug:
print >>sys.stderr, (
'\nStateMachine.check_line: Matched transition '
'"%s" in state "%s".'
% (name, state.__class__.__name__))
return method(match, context, next_state)
else:
if self.debug:
print >>sys.stderr, (
'\nStateMachine.check_line: No match in state "%s".'
% state.__class__.__name__)
return state.no_match(context, transitions)
def add_state(self, state_class):
"""
Initialize & add a `state_class` (`State` subclass) object.
Exception: `DuplicateStateError` raised if `state_class` was already
added.
"""
statename = state_class.__name__
if statename in self.states:
raise DuplicateStateError(statename)
self.states[statename] = state_class(self, self.debug)
def add_states(self, state_classes):
"""
Add `state_classes` (a list of `State` subclasses).
"""
for state_class in state_classes:
self.add_state(state_class)
def runtime_init(self):
"""
Initialize `self.states`.
"""
for state in self.states.values():
state.runtime_init()
def error(self):
"""Report error details."""
type, value, module, line, function = _exception_data()
print >>sys.stderr, '%s: %s' % (type, value)
print >>sys.stderr, 'input line %s' % (self.abs_line_number())
print >>sys.stderr, ('module %s, line %s, function %s'
% (module, line, function))
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes two
arguments, the source and offset of the current line.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self):
for observer in self.observers:
try:
info = self.input_lines.info(self.line_offset)
except IndexError:
info = (None, None)
observer(*info)
class State:
"""
State superclass. Contains a list of transitions, and transition methods.
Transition methods all have the same signature. They take 3 parameters:
- An `re` match object. ``match.string`` contains the matched input line,
``match.start()`` gives the start index of the match, and
``match.end()`` gives the end index.
- A context object, whose meaning is application-defined (initial value
``None``). It can be used to store any information required by the state
machine, and the retured context is passed on to the | next transition
method unchanged.
- The name of the next state, a string, taken from the transitions list;
normally it is returned unchanged, | but it may be altered by the
transition method if necessary.
Transition methods all return a 3-tuple:
- A context object, as (potentially) modified by the transition method.
- The next state name (a return value of ``None`` means no state change).
- The processing result, a list, which is accumulated by the state
machine.
Transition methods may raise an `EOFError` to cut processing short.
There are two implicit transitions, and corresponding transition methods
are defined: `bof()` handles the beginning-of-file, and `eof()` handles
the end-of-file. These methods have non-standard signatures and return
values. `bof()` returns the initial context and results, and may be used
to return a header string, or do any other processing needed. `eof()`
should handle any remaining context and wrap things up; it returns the
final processing result.
Typical applications need only subclass `State` (or a subclass), set the
`patterns` and `initial_transitions` class attributes, and provide
corresponding transition methods. The default object initialization will
take care of constructing the list of transitions.
"""
patterns = None
"""
{Name: pattern} mapping, used by `make_transition()`. Each pattern may
be a string or a compiled `re` pattern. Override in subclasses.
"""
initial_transitions = None
"""
A list of transitions to initialize when a `State` is instantiated.
Each entry is either a transition name string, or a (transition name, next
state name) pair. See `make_transitions()`. Override in subclasses.
"""
nested_sm = None
"""
The `StateMachine` class for handling nested processing.
If left as ``None``, `nested_sm` defaults to the class of the state's
controlling state machine. Override it in subclasses to avoid the default.
"""
nested_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `nested_sm` constructor.
Two keys must have entries in the dictionary:
- Key 'state_classes' must be set to a list of `State` classes.
- Key 'initial_state' must be set to the name of the initial state class.
If `nested_sm_kwargs` is left as ``None``, 'state_classes' defaults to the
class of the current state, and 'initial_state' defaults to the name of
the class of the current state. Override in subclasses to avoid the
defaults.
"""
def __init__(self, state_machine, debug=0):
"""
Initialize a `State` object; make & add initial transitions.
Parameters:
- `statemachine`: the controlling `StateMachine` object.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.transition_order = []
"""A list of transition names in search order."""
self.transitions = {}
"""
A mapping of transition names to 3-tuples containing
(compiled_pattern, transition_method, next_state_name). Initialized as
an instance attribute dynamically (instead of as a class attribute)
because it may make forward references to patterns and methods in this
or other classes.
"""
self.add_initial_transitions()
self.state_machine = state_machine
"""A reference to the controlling `StateMachine` object."""
self.debug = debug
"""Debugging mode on/off."""
if self.nested_sm is None:
self.nested_sm = self.state_machine.__class__
if self.nested_sm_kwargs is None:
self.nested_sm_kwargs = {'state_classes': [self.__class__],
'initial_state': self.__class__.__name__}
def runtime_init(self):
"""
Initialize this `State` before running the state machine; called from
`self.state_machine.run()`.
"""
pass
def unlink(self):
"""Remove circular references to objects no longer required."""
self.state_machine = None
def add_initial_transitions(self):
"""Make and add transitions listed in `self.initial_transitions`."""
if self.initial_transitions:
names, transitions = self.make_transitions(
self.initial_transitions)
self.add_transitions(names, transitions)
def add_transitions(self, names, transitions):
"""
Add a list of transitions to the start of the transition list.
Parameters:
- `names`: a list of transition names.
- `transitions`: a mapping of names to transition tuples.
Exceptions: `DuplicateTransitionError`, `UnknownTransitionError`.
"""
for name in names:
if name in self.transitions:
raise DuplicateTransitionError(name)
if name not in transitions:
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Team:
# J Phani Mahesh <phanimahesh@gmail.com>
# Barneedhar (jokerdino) <barneedhar@ubuntu.com>
# Amith KK <amithkumaran@gmail.com>
# Georgi Karavasilev <motorslav@gmail.com>
# Sam Tran <samvtran@gmail.com>
# Sam Hewitt <hewittsamuel@gmail.com>
# Angel Araya <al.arayaq@gmail.com>
#
# Description:
# A One-stop configuration tool for Unity.
#
# Legal Stuff:
#
# This file is a part of Unity Tweak Tool
#
# Unity Tweak Tool is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# Unity Tweak Tool is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <https://www.gnu.org/licenses/gpl-3.0.txt>
from UnityTweakTool.section.skeletonpage import Section,Tab
from UnityTweakTool.elements.fontbutton import FontButton
from UnityTweakTool.elements.cbox import ComboBox
from UnityTweakTool.elements.spin import SpinButton
from UnityTweakTool.elements.radio import Radio
from UnityTweakTool.elements.checkbox import CheckBox
from UnityTweakTool.section.spaghetti.theme import Themesettings as SpaghettiThemeSettings
from UnityTweakTool.elements.option import Option,HandlerObject
from collections import defaultdict
Appearance =Section(ui='appearance.ui',id='nb_themesettings')
#=============== THEME ==========================
#=============== ICONS ==========================
#=============== CURSOR =========================
#=============== FONTS ==========================
font_default= FontButton({
'id' : 'font_default',
'builder' : Appearance.builder,
'schema' : 'org.gnome.desktop.interface',
'path' : None,
'key' : 'font-name',
'type' : 'string'
})
font_document= FontButton({
'id' : 'font_document',
'builder' : Appearance.builder,
'schema' : 'org.gnome.desktop.interface',
'path' : None,
'key' : 'document-font-name',
'type' : 'string'
})
font_monospace= FontButton({
'id' : 'font_monospace',
'builder' : Appearance.builder,
'schema' : 'org.gnome.desktop.interface',
'path' : None,
'key' : 'monospace-font-name',
'type' : 'string'
})
font_window_title= FontButton({
'id' : 'font_window_title',
'builder' : Appearance.builder,
'schema' : 'org.gnome.desktop.wm.preferences',
'path' : None,
'key' : 'titlebar-fon | t',
'type' : 'string'
})
cbox_anti | aliasing=ComboBox({
'id' : 'cbox_antialiasing',
'builder' : Appearance.builder,
'schema' : 'org.gnome.settings-daemon.plugins.xsettings',
'path' : None,
'key' : 'antialiasing',
'type' : 'string',
'map' : {'none':0,'grayscale':1,'rgba':2}
})
cbox_hinting=ComboBox({
'id' : 'cbox_hinting',
'builder' : Appearance.builder,
'schema' : 'org.gnome.settings-daemon.plugins.xsettings',
'path' : None,
'key' : 'hinting',
'type' : 'string',
'map' : {'none':0,'slight':1,'medium':2,'full':3}
})
spin_textscaling=SpinButton({
'id' : 'spin_textscaling',
'builder': Appearance.builder,
'schema' : 'org.gnome.desktop.interface',
'path' : None,
'key' : 'text-scaling-factor',
'type' : 'double',
'min' : 0.50,
'max' : 3.00
})
Fonts=Tab([font_default,
font_document,
font_monospace,
font_window_title,
cbox_antialiasing,
cbox_hinting,
spin_textscaling])
#========== WINDOW CONTROLS =====================
radio_left=Radio({
'id' : 'radio_left',
'builder' : Appearance.builder,
'schema' : 'org.gnome.desktop.wm.preferences',
'path' : None,
'key' : 'button-layout',
'type' : 'string',
'group' : 'radio_left',
'value' : 'close,minimize,maximize:',
'dependants': []
})
radio_right=Radio({
'id' : 'radio_right',
'builder' : Appearance.builder,
'schema' : 'org.gnome.desktop.wm.preferences',
'path' : None,
'key' : 'button-layout',
'type' : 'string',
'group' : 'radio_right',
'value' : ':minimize,maximize,close',
'dependants': []
})
WindowControls=Tab([radio_left,
radio_right])
# Pass in the id of restore defaults button to enable it.
Fonts.enable_restore('b_theme_font_reset')
WindowControls.enable_restore('b_window_control_reset')
# Each page must be added using add_page
Appearance.add_page(Fonts)
# XXX : Disabled since the implementation is inadequate
# Appearance.add_page(WindowControls)
themesettings=HandlerObject(SpaghettiThemeSettings(Appearance.builder))
Appearance.add_page(themesettings)
# After all pages are added, the section needs to be registered to start listening for events
Appearance.register()
|
ted_string = unquoted_stringitem+
dquoted_stringitem = dquoted_stringchar / escapeseq
squoted_stringitem = squoted_stringchar / escapeseq
unquoted_stringitem = unquoted_stringchar / escapeseq
dquoted_stringchar = ~r'[^\r\n"\\]'
squoted_stringchar = ~r"[^\r\n'\\]"
unquoted_stringchar = ~r"[^\s'\"\\]"
escapeseq = ~r"\\."
_ = ~r"\s*"
""")
def urljoin2(base, path, **kwargs):
if not base.endswith('/'):
base += '/'
url = urljoin(base, path, **kwargs)
if url.endswith('/') and not path.endswith('/'):
url = url[:-1]
return url
def generate_help_text():
"""Return a formatted string listing commands, HTTPie options, and HTTP
actions.
"""
def generate_cmds_with_explanations(summary, cmds):
text = '{0}:\n'.format(summary)
for cmd, explanation in cmds:
text += '\t{0:<10}\t{1:<20}\n'.format(cmd, explanation)
return text + '\n'
text = generate_cmds_with_explanations('Commands', ROOT_COMMANDS.items())
text += generate_cmds_with_explanations('Options', OPTION_NAMES.items())
text += generate_cmds_with_explanations('Actions', ACTIONS.items())
text += generate_cmds_with_explanations('Headers', HEADER_NAMES.items())
return text
class ExecutionVisitor(NodeVisitor):
def __init__(self, context):
super(ExecutionVisitor, self).__init__()
self.context = context
self.context_override = Context(context.url)
self.method = None
self.tool = None
def visit_method(self, node, children):
self.method = node.text
return node
def visit_urlpath(self, node, children):
path = node.text
self.context_override.url = urljoin2(self.context_override.url, path)
return node
def visit_cd(self, node, children):
_, _, _, path, _ = children
self.context_override.url = urljoin2(self.context_override.url, path)
return node
def visit_rm(self, node, children):
children = children[0]
kind = children[3].text
if kind == '*':
# Clear context
for target in [self.context.headers,
self.context.querystring_params,
self.context.body_params,
self.context.options]:
target.clear()
return node
name = children[5]
if kind == '-h':
target = self.context.headers
elif kind == '-q':
target = self.context.querystring_params
elif kind == '-b':
target = self.context.body_params
else:
assert kind == '-o'
target = self.context.options
del target[name]
return node
def visit_help(self, node, children):
click.echo_via_pager(generate_help_text())
return node
def visit_exit(self, node, children):
self.context.should_exit = True
return node
def visit_mutkey(self, node, children):
if isinstance(children[0], list):
return children[0][1]
return children[0]
def _mutate(self, node, key, op, val):
if op == ':':
target = self.context_override.headers
elif op == '==':
target = self.context_override.querystring_params
elif op == '=':
target = self.context_override.body_params
target[key] = val
return node
def visit_unquoted_mut(self, node, children):
_, key, op, val, _ = children
return self._mutate(node, key, op, val)
def visit_full_squoted_mut(self, node, children):
_, _, key, op, val, _, _ = children
return self._mutate(node, key, op, val)
def visit_full_dquoted_mut(self, node, children):
_, _, key, op, val, _, _ = children
return self._mutate(node, key, op, val)
def visit_value_squoted_mut(self, node, children):
_, key, op, _, val, _, _ = children
return self._mutate(node, key, op, val)
def visit_value_dquoted_mut(self, node, children):
_, key, op, _, val, _, _ = children
return self._mutate(node, key, op, val)
def visit_unquoted_mutkey(self, node, children):
return unescape(node.text)
def visit_squoted_mutkey(self, node, children):
return node.text
def visit_dquoted_mutkey(self, node, children):
return node.text
def visit_mutop(self, node, children):
return node.text
def visit_unquoted_mutval(self, node, children):
return unescape(node.text)
def visit_squoted_mutval(self, node, children):
return node.text
def visit_dquoted_mutval(self, node, children):
return node.text
def visit_flag_option_mut(self, node, children):
_, key, _ = children
self.context_override.options[key] = None
return node
def visit_flag_optname(self, node, children):
return node.text
def visit_value_option_mut(self, node, children):
_, key, _, val, _ = children
self.context_override.options[key] = val
return node
def visit_value_optname(self, node, children):
return node.text
def visit_string(self, node, children):
return children[0]
def visit_unquoted_string(self, node, children):
return unescape(node.text)
def visit_quoted_string(self, node, children):
return node.text[1:-1]
def visit_tool(self, node, children):
self.tool = node.text
return node
def visit_mutation(self, node, children):
self.context.update(self.context_override)
return node
def _final_context(self):
context = self.context.copy()
context.update(self.context_override)
return context
def visit_immutation(self, node, children):
context = self._final_context()
child_type = children[0].expr_name
if chi | ld_type == 'preview':
if self.tool == 'httpie':
command = ['http'] + context.httpie_args(self.method,
quote=True)
else:
assert self.tool == 'curl'
command = ['curl'] + context.curl_args(self.method, quote=True)
click.echo(' ' | .join(command))
elif child_type == 'action':
output = BytesIO()
try:
env = Environment(stdout=output, is_windows=False)
httpie_main(context.httpie_args(self.method), env=env)
content = output.getvalue()
finally:
output.close()
# XXX: Work around a bug of click.echo_via_pager(). When you pass
# a bytestring to echo_via_pager(), it converts the bytestring with
# str(b'abc'), which makes it "b'abc'".
if six.PY2:
content = unicode(content, 'utf-8') # noqa
else:
content = str(content, 'utf-8')
click.echo_via_pager(content)
return node
def generic_visit(self, node, children):
if not node.expr_name and node.children:
if len(children) == 1:
return children[0]
return children
return node
def execute(command, context):
try:
root = grammar.parse(command)
except ParseError as err:
# TODO: Better error message
part = command[err.pos:err.pos + 10]
click.secho('Syntax error near "%s"' % part, err=True, fg='red')
else:
visitor = ExecutionVisitor(context)
try:
visitor.visit(root)
except VisitationError as err:
exc_class = err.original_class
if exc_class is KeyError:
# XXX: Need to parse VisitationError error message to get the
# original error message as VisitationError doesn't hold the
# original exception object
key = re.search(r"KeyError: u?'(.*)'", str(err)).group(1)
click.secho("Key '%s' not found" % key, err=True,
fg='red')
else:
|
# coding: utf-8
from bernard import (
layers as lyr,
)
from bernard.analytics import (
page_view,
)
from bernard.engine import (
BaseState,
)
from bernard.i18n import (
translate as t,
)
class __project_name_camel__State(BaseState):
"""
Root class for __project_name_readable__.
Here you must implement "error" and "confused" to suit your needs. They
are the default functions called when something goes wrong. The ERROR and
CONFUSED texts are defined in `i18n/en/responses.csv`.
"""
@page_view('/bot/error')
async def error(self) -> None:
"""
This happens when something goes wrong (it's the equivalent of the
HTTP error 500).
"""
self.send(lyr.Text(t.ERROR))
@page_view('/bot/confused')
async def confused(se | lf) -> None:
"""
This is called when the user sends a message that triggers no
| transitions.
"""
self.send(lyr.Text(t.CONFUSED))
async def handle(self) -> None:
raise NotImplementedError
class Hello(__project_name_camel__State):
"""
Example "Hello" state, to show you how it's done. You can remove it.
Please note the @page_view decorator that allows to track the viewing of
this page using the analytics provider set in the configuration. If there
is no analytics provider, nothing special will happen and the handler
will be called as usual.
"""
@page_view('/bot/hello')
async def handle(self):
self.send(lyr.Text(t.HELLO))
|
_ENV_WHITELIST_VARS, **TEST_ENV_MATCHED_VARS)
TEST_SEPARATED_VARS = dict(TEST_SEPARATED_VARS, **TEST_ENV_VARS)
TEST_ENV_VARS = dict(TEST_ENV_VARS, **TEST_ENV_BASE_VARS)
TEST_ENV_CONVERTED = {
"env--var": "result",
"env--var-2": "second_result",
"matched-var": "match",
"whitelisted-var": "whitelist",
}
TEST_ENV_CONVERTED_SEPARATED = {
"env": {"var": "result", "var-2": "second_result"},
"matched-var": "match",
"whitelisted-var": "whitelist",
}
TEST_ENV_UPPERCASE = {
"ENV__VAR": "result",
"ENV__VAR_2": "second_result",
"MATCHED_VAR": "match",
"WHITELISTED_VAR": "whitelist",
}
TEST_ENV_TYPED_VARS = {
"key": "value",
"int": "123",
"float": "1.23",
"complex": "1+2j",
"list": "['list1', 'list2', {'dict_in_list': 'value'}]",
"dict": "{'nested_dict': 'nested_value'}",
"tuple": "(123, 'string')",
"bool": "True",
"boolstring": "false",
"string_with_specials": "Test!@#$%^&*()-_=+[]{};:,<.>/?\\'\"`~",
} # noqa: E501
TEST_ENV_TYPED_VARS_PARSED = {
"key": "value",
"int": 123,
"float": 1.23,
"complex": 1 + 2j,
"list": ["list1", "list2", {"dict_in_list": "value"}],
"dict": {"nested_dict": "nested_value"},
"tuple": (123, "string"),
"bool": True,
"boolstring": False,
"string_with_specials": "Test!@#$%^&*()-_=+[]{};:,<.>/?\\'\"`~",
} # noqa: E501
TEST_ENV_DOCKER_SECRETS = {"MY_EXAMPLE_SECRET_FILE": "/run/secrets/my_example_secret"}
TEST_ENV_DOCKER_SECRETS_INVALID_POSTFIX = {
"MY_EXAMPLE_SECRET": "/run/secrets/my_example_secret"
}
TEST_DOCKER_SECRET_CONTENT = "mysecret"
TEST_DOCKER_SE | CRETS_RESULT = {"MY_EXAMPLE_SECRET": TEST_DOCKER_SECRET_CONTENT}
TEST_SEPARATOR = "__"
TEST_MATCH = r"^matched"
TEST_WHITELIST = ["whit | elisted_var", "whitelist2"]
TEST_PARSE_VALUES = True
TEST_TO_LOWER = True
TEST_CONVERT_UNDERSCORES = True
TEST_DOCKER_SECRETS = list(TEST_ENV_DOCKER_SECRETS.keys())
TEST_DOCKER_SECRETS_INVALID_POSTFIX = ["MY_EXAMPLE_SECRET"]
TEST_DOCKER_SECRETS_PATH = str(list(TEST_DOCKER_SECRETS_RESULT.values())[0])
MOCK_OPEN_FUNCTION = "builtins.open"
def throw_ioerror(*args, **kwargs):
raise IOError("test")
class TestEnv(TestCase):
def test_default_params(self):
env_store = Env()
self.assertEqual(env_store.separator, None)
self.assertEqual(env_store.match, None)
self.assertEqual(env_store.whitelist, None)
self.assertEqual(env_store.parse_values, False)
self.assertEqual(env_store.to_lower, False)
self.assertEqual(env_store.convert_underscores, False)
def test_optional_params(self):
env_store = Env(
separator=TEST_SEPARATOR,
match=TEST_MATCH,
whitelist=TEST_WHITELIST,
parse_values=TEST_PARSE_VALUES,
to_lower=TEST_TO_LOWER,
convert_underscores=TEST_CONVERT_UNDERSCORES,
)
self.assertEqual(env_store.separator, TEST_SEPARATOR)
self.assertEqual(env_store.match, TEST_MATCH)
self.assertEqual(env_store.whitelist, TEST_WHITELIST)
self.assertEqual(env_store.parse_values, TEST_PARSE_VALUES)
self.assertEqual(env_store.to_lower, TEST_TO_LOWER)
self.assertEqual(env_store.convert_underscores, TEST_CONVERT_UNDERSCORES)
@patch("pconf.store.env.os", new=MagicMock())
def test_get_all_vars(self):
pconf.store.env.os.environ = TEST_ENV_VARS
env_store = Env()
result = env_store.get()
self.assertEqual(result, TEST_ENV_VARS)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_get_idempotent(self):
pconf.store.env.os.environ = TEST_ENV_VARS
env_store = Env()
result = env_store.get()
self.assertEqual(result, TEST_ENV_VARS)
self.assertIsInstance(result, dict)
pconf.store.env.os.environ = TEST_ENV_BASE_VARS
result = env_store.get()
self.assertEqual(result, TEST_ENV_VARS)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_whitelist(self):
pconf.store.env.os.environ = TEST_ENV_VARS
env_store = Env(whitelist=TEST_WHITELIST)
result = env_store.get()
self.assertEqual(result, TEST_ENV_WHITELIST_VARS)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_match(self):
pconf.store.env.os.environ = TEST_ENV_VARS
env_store = Env(match=TEST_MATCH)
result = env_store.get()
self.assertEqual(result, TEST_ENV_MATCHED_VARS)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_whitelist_and_match(self):
pconf.store.env.os.environ = TEST_ENV_VARS
env_store = Env(match=TEST_MATCH, whitelist=TEST_WHITELIST)
result = env_store.get()
self.assertEqual(result, dict(TEST_ENV_MATCHED_VARS, **TEST_ENV_WHITELIST_VARS))
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_separator(self):
pconf.store.env.os.environ = TEST_ENV_VARS
env_store = Env(separator=TEST_SEPARATOR)
result = env_store.get()
self.assertEqual(result, TEST_SEPARATED_VARS)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_parse_values(self):
pconf.store.env.os.environ = TEST_ENV_TYPED_VARS
env_store = Env(parse_values=TEST_PARSE_VALUES)
result = env_store.get()
self.assertEqual(result, TEST_ENV_TYPED_VARS_PARSED)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_lowercase_conversion(self):
pconf.store.env.os.environ = TEST_ENV_UPPERCASE
env_store = Env(to_lower=TEST_TO_LOWER)
result = env_store.get()
self.assertEqual(result, TEST_ENV_VARS)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_lowercase_and_separator(self):
pconf.store.env.os.environ = TEST_ENV_UPPERCASE
env_store = Env(separator=TEST_SEPARATOR, to_lower=TEST_TO_LOWER)
result = env_store.get()
self.assertEqual(result, TEST_SEPARATED_VARS)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_convert_underscore_replacement(self):
pconf.store.env.os.environ = TEST_ENV_VARS
env_store = Env(convert_underscores=TEST_CONVERT_UNDERSCORES)
result = env_store.get()
self.assertEqual(result, TEST_ENV_CONVERTED)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_convert_underscore_and_separator(self):
pconf.store.env.os.environ = TEST_ENV_VARS
env_store = Env(
separator=TEST_SEPARATOR, convert_underscores=TEST_CONVERT_UNDERSCORES
)
result = env_store.get()
self.assertEqual(result, TEST_ENV_CONVERTED_SEPARATED)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_parse_and_split_order(self):
pconf.store.env.os.environ = TEST_ENV_VARS
try:
env_store = Env(separator=TEST_SEPARATOR, parse_values=TEST_PARSE_VALUES)
except AttributeError:
self.fail("Parsing environment variables raised AttributeError")
result = env_store.get()
self.assertEqual(result, TEST_SEPARATED_VARS)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
@patch(MOCK_OPEN_FUNCTION, mock_open(read_data=TEST_DOCKER_SECRETS_PATH))
def test_docker_secrets(self):
pconf.store.env.os.environ = TEST_ENV_DOCKER_SECRETS
env_store = Env(docker_secrets=TEST_DOCKER_SECRETS)
result = env_store.get()
self.assertEqual(list(result.keys()), list(TEST_DOCKER_SECRETS_RESULT.keys()))
self.assertEqual(result, TEST_DOCKER_SECRETS_RESULT)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def |
# -*- coding: utf-8 -*-
import datetime, time, csv, os
from utils.db import SqliteDB
from utils.rwlogging import log
from utils.rwlogging import strategyLogger as logs
from trader import Trader
from indicator import ma, macd, bolling, rsi, kdj
from strategy.pool import StrategyPool
highest = 0
def runStrategy(prices):
logs.info('STRATEGY,BUY TIMES, SELL TIMES, FINAL EQUITY')
#prices = SqliteDB().getAllPrices(table)
ps = [p['close'] for p in prices]
pool = StrategyPool(100)
#doBollingTrade(pool, prices, ps, 12, 2.4)
#pool.showStrategies()
#return
for i in range(2, 40):
j = 0
log.debug(i)
while j <= 5:
doBollingTrade(pool, prices, ps, i, j)
j += 0.1
pool.showStrategies()
def doBollingTrade(pool, prices, ps, period, deviate):
global highest
sname = 'BOLLING_' + str(period) + '_' + str(deviate)
bollings = bolling.calc_bolling(prices, period, deviate)
t = Trader(sname)
for i in range(period, len(prices)):
if ps[i-1] > bollings['lower'][i-1] and ps[i] < bollings['lower'][i] and t.bsflag < 1:
notes = 'LAST p: ' + str(ps[i - 1]) + ';boll lower: ' + str(bollings['lower'][i-1]) + 'CURRENT p: ' + str(ps[i]) + ';boll lower: ' + str(bollings['lower'][i])
t.buy(prices[i]['date'], prices[i]['time'], prices[i]['rmb'], notes)
if ps[i-1] < bollings['mean'][i-1] and ps[i] >= bollings['mean'][i] and t.bsflag == 1:
notes = 'LAST p: ' + str(ps[i - 1]) + ';boll mean: ' + str(bollings['mean'][i-1]) + 'CURRENT p: ' + str(ps[i]) + ';boll mean: ' + str(bollings['mean'][i])
t.buy(prices[i]['date'], prices[i]['time'], prices[i]['rmb'], notes)
if ps[i-1] < bollings['upper'][i-1] and ps[i] > bollings['upper'][i] and t.bsflag > -1:
notes = 'LAST p: ' + str(ps[i - 1]) + ';boll upper: ' + str(bollings['upper'][i-1]) + 'CURRENT p: ' + str(ps[i]) + ';boll upper: ' + str(bollings | ['upper'][i])
t.sell(prices[i]['date' | ], prices[i]['time'], prices[i]['rmb'], notes)
if ps[i-1] > bollings['mean'][i-1] and ps[i] <= bollings['mean'][i] and t.bsflag == -1:
notes = 'LAST p: ' + str(ps[i - 1]) + ';boll mean: ' + str(bollings['mean'][i-1]) + 'CURRENT p: ' + str(ps[i]) + ';boll mean: ' + str(bollings['mean'][i])
t.sell(prices[i]['date'], prices[i]['time'], prices[i]['rmb'], notes)
t.show(prices[i]['date'], prices[i]['time'], prices[i]['rmb'])
pool.estimate(t)
|
#!/usr/bin/env python
"""
This module contains the :class:`.DataType` class and its subclasses. These
types define how data should be converted during the creation of a
:class:`.Table`.
A :class:`TypeTester` class is also included which be used to infer data
types from column data.
"""
from copy import copy
from agate.data_types.base import DEFAULT_NULL_VALUES, DataType # noqa
from agate.data_types.boolean import Boolean
from agate.data_types.date import Date
from agate.data_types.date_time import DateTime
from agate.data_types.number import Number
from agate.data_types.text import Text
from agate.data_types.time_delta import TimeDelta
from agate.exceptions import CastError # noqa
class TypeTester(object):
"""
Infer data types for the columns in a given set of data.
:param force:
A dictionary where each key is a column name and each value is a
:class:`.DataType` instance that overrides inference.
:param limit:
An optional limit on how many rows to evaluate before selecting the
most likely type. Note that applying a limit may mean errors arise when
the data is cast--if the guess is proved incorrect in further rows of
data.
:param types:
A sequence of possible types to test against. This be used to specify
what data formats you want to test against. For instance, you may want
to exclude :class:`TimeDelta` from testing. It can also be used to pass
options such as ``locale`` to :class:`.Number` or ``cast_nulls`` to
:class:`.Text`. Take care in specifying the order of the list. It is
the order they are tested in. :class:`.Text` should always be last.
"""
def __init__(self, force={}, limit=None, types=None):
self._force = force
self._limit = limit
if types:
self._possible_types = types
else:
# In order of preference
self._possible_types = [
Boolean(),
Number(),
TimeDelta(),
Date(),
DateTime(),
Text()
]
def run(self, rows, column_names):
"""
Apply type inference to the provided data and return an array of
column types.
:param rows:
The data as a sequence of any sequences: tuples, lists, etc.
"""
num_columns = len(column_names)
hypotheses = [set(self._possible_types) for i in range(num_columns)]
force_indices = [column_names.index(name) for name in self._force.keys()]
if self._limit:
sample_rows = rows[:self._limit]
elif self._limit == 0:
text = Text()
return tuple([text] * num_columns)
else:
sample_rows = rows
for row in sample_rows:
for i in range(num_columns):
if i in force_indices:
continue
h = hypotheses[i]
if len(h) == 1:
continue
for column_type in copy(h):
if len(row) > i and not column_type.test(row[i]):
| h.remove(column_type)
column_types = []
for i in range(num_columns):
if i in force_indices:
column_types.append(self._force[column_names[i]])
continue
h = hypotheses[i]
# Select in prefer order
for t in self._possible_types:
if t in h:
column_types.append(t)
break
return tuple(column_typ | es)
|
ion.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This script creates simple experiment to compute the object classification
accuracy of L2-L4-L6 network using objects from YCB dataset and "Thing" sensor
"""
import glob
import json
import logging
import os
import random
from collections import defaultdict, OrderedDict
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
from nupic.encoders import ScalarEncoder
from htmresearch.frameworks.location.location_network_creation import L246aNetwork
from htmresearch.support.expsuite import PyExperimentSuite
from htmresearch.support.register_regions import registerAllResearchRegions
logging.basicConfig(level=logging.ERROR)
def loadThingData(dataDir="data", n=150, w=11):
"""
Load Thing sensation data. There is one file per object, each row contains one
feature, location pairs. The format is as follows:
[(-33.6705, 75.5003, 2.4207)/10] => [[list of active bits of location],
[list of active bits of feature]]
The content before "=>" is the true 3D location / sensation
We ignore the encoded values after "=>" and use :class:`ScalarEncoder` to
encode the sensation in a way that is compatible with the experiment network.
:param dataDir: The location data files
:type dataDir: str
:param n: The number of bits in the feature SDR. Usually L4 column count
:type n: int
:param w: Number of 'on' bits in the feature SDR. Usually L4 sample size
:type w: int
:return: Dictionary mapping objects to sensations that can be used directly by
class L246aNetwork 'infer' and 'learn' methods
:rtype: dict[str,list]
"""
objects = defaultdict(list)
# Thing features are scalar values ranging from 1-25 inclusive
encoder = ScalarEncoder(n=n, w=w, minval=1, maxval=25, forced=True)
dataPath = os.path.dirname(os.path.realpath(__file__))
dataPath = os.path.join(dataPath, dataDir)
objFiles = glob.glob1(dataPath, "*.log")
for filename in objFiles:
obj, _ = os.path.splitext(filename)
# Read raw sensations from log file. Ignore SDRs after "=>"
sensations = []
with open(os.path.join(dataPath, filename)) as f:
for line in f.readlines():
# Parse raw location/feature values
line = line.split("=>")[0].translate(None, "[,]()")
locationStr, featureStr = line.split("/")
location = map(float, locationStr.split())
feature = encoder.encode(int(featureStr)).nonzero()[0].tolist()
sensations.append((location, feature))
# Assume single column
objects[obj] = [sensations]
return objects
class L2L4L6aExperiment(PyExperimentSuite):
"""
Compute the object classification accuracy of L2-L4-L6 network using objects
from YCB dataset and a single column "Thing" sensor.
"""
def reset(self, params, repetition):
"""
Take the steps necessary to reset the experiment before each repetition:
- Make sure random seed is different for each repetition
- Create the L2-L4-L6a network
- Load objects used by the experiment
- Learn all objects used by the experiment
"""
print params["name"], ":", repetition
self.debug = params.get("debug", False)
self.numLearningPoints = params["num_learning_points"]
self.numOfSensations = params["num_sensations"]
L2Params = json.loads('{' + params["l2_params"] + '}')
L4Params = json.loads('{' + params["l4_params"] + '}')
L6aParams = json.loads('{' + params["l6a_params"] + '}')
self.sdrSize = L2Params["sdrSize"]
# Make sure random seed is different for each repetition
seed = params.get("seed", 42)
np.random.seed(seed + repetition)
random.seed(seed + repetition)
L2Params["seed"] = seed + repetition
L4Params["seed"] = seed + repetition
L6aParams["seed"] = seed + repetition
# Configure L6a params
numModules = params["num_modules"]
L6aParams["scale"] = [params["scale"]] * numModu | les
angle = | params["angle"] / numModules
orientation = range(angle / 2, angle * numModules, angle)
L6aParams["orientation"] = np.radians(orientation).tolist()
L6aParams["cellsPerAxis"] = params["cells_per_axis"]
# Create single column L2-L4-L6a network
self.network = L246aNetwork(numColumns=1, L2Params=L2Params,
L4Params=L4Params, L6aParams=L6aParams,
repeat=self.numLearningPoints,
logCalls=self.debug)
# Load Thing Objects
sampleSize = L4Params["sampleSize"]
columnCount = L4Params["columnCount"]
# Make sure w is odd per encoder requirement
sampleSize = sampleSize if sampleSize % 2 != 0 else sampleSize + 1
self.objects = loadThingData(dataDir=params["data_path"],
w=sampleSize, n=columnCount)
# Number of iterations must match the number of objects. This will allow us
# to execute one iteration per object and use the "iteration" parameter as
# the object index
assert params["iterations"] == len(self.objects)
# Learn objects
self.network.learn(self.objects)
def iterate(self, params, repetition, iteration):
"""
For each iteration try to infer the object represented by the 'iteration'
parameter returning Whether or not the object was unambiguously classified.
:param params: Specific parameters for this iteration. See 'experiments.cfg'
for list of parameters
:param repetition: Current repetition
:param iteration: Use the iteration to select the object to infer
:return: Whether or not the object was classified
"""
objname, sensations = self.objects.items()[iteration]
# Select sensations to infer
np.random.shuffle(sensations[0])
sensations = [sensations[0][:self.numOfSensations]]
self.network.sendReset()
# Collect all statistics for every inference.
# See L246aNetwork._updateInferenceStats
stats = defaultdict(list)
self.network.infer(sensations=sensations, stats=stats, objname=objname)
stats.update({"name": objname})
return stats
def plotAccuracy(suite, name):
"""
Plots classification accuracy
"""
path = suite.cfgparser.get(name, "path")
path = os.path.join(path, name)
accuracy = defaultdict(list)
sensations = defaultdict(list)
for exp in suite.get_exps(path=path):
params = suite.get_params(exp)
maxTouches = params["num_sensations"]
cells = params["cells_per_axis"]
res = suite.get_history(exp, 0, "Correct classification")
classified = [any(x) for x in res]
accuracy[cells] = float(sum(classified)) / float(len(classified))
touches = [np.argmax(x) or maxTouches for x in res]
sensations[cells] = [np.mean(touches), np.max(touches)]
plt.title("Classification Accuracy")
accuracy = OrderedDict(sorted(accuracy.items(), key=lambda t: t[0]))
fig, ax1 = plt.subplots()
ax1.plot(accuracy.keys(), accuracy.values(), "b")
ax1.set_xlabel("Cells per axis")
ax1.set_ylabel("Accuracy", color="b")
ax1.tick_params("y", colors="b")
sensations = OrderedDict(sorted(sensations.items(), key=lambda t: t[0]))
ax2 = ax1.twinx()
ax2.set_prop_cycle(linestyle=["-", "--"])
ax2.plot(sensations.keys(), sensations.values(), "r")
ax2.set_ylabel("Sensations", color="r")
ax2.tick_params("y", colors="r")
ax2.legend(("Mean", "Max"))
# save
path = suite.cfgparser.get(name, "path")
plotPath = os.path.join(path, "{}.pdf".format(name))
plt.savefig(plotPath)
plt.close()
if __name__ == "__main__":
registerAllResearchRegions()
suite = L2L4L6aExperiment()
suite.start()
experiments = suite.options.experiments
if exper |
te(
self._working_set(self._member_id_tuples()).difference(_iter_id(iterable)))
return result
def __sub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.difference(other)
def difference_update(self, iterable):
self._members = self.difference(iterable)._members
def __isub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.difference_update(other)
return self
def intersection(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._member_id_tuples()).intersection(_iter_id(iterable)))
return result
def __and__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.intersection(other)
def intersection_update(self, iterable):
self._members = self.intersection(iterable)._members
def __iand__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.intersection_update(other)
return self
def symmetric_ | difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._member_id_tuples()).symmetric_difference(_iter_id(iterable)))
return result
def _member_id_tuples(self):
return ((id(v), v) for v in self._members.itervalues())
def __xor__(self, other):
if not isinstance(other, IdentitySet):
return NotI | mplemented
return self.symmetric_difference(other)
def symmetric_difference_update(self, iterable):
self._members = self.symmetric_difference(iterable)._members
def __ixor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.symmetric_difference(other)
return self
def copy(self):
return type(self)(self._members.itervalues())
__copy__ = copy
def __len__(self):
return len(self._members)
def __iter__(self):
return self._members.itervalues()
def __hash__(self):
raise TypeError('set objects are unhashable')
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self._members.values())
class OrderedIdentitySet(IdentitySet):
class _working_set(OrderedSet):
# a testing pragma: exempt the OIDS working set from the test suite's
# "never call the user's __hash__" assertions. this is a big hammer,
# but it's safe here: IDS operates on (id, instance) tuples in the
# working set.
__sa_hash_exempt__ = True
def __init__(self, iterable=None):
IdentitySet.__init__(self)
self._members = OrderedDict()
if iterable:
for o in iterable:
self.add(o)
if sys.version_info >= (2, 5):
class PopulateDict(dict):
"""A dict which populates missing values via a creation function.
Note the creation function takes a key, unlike
collections.defaultdict.
"""
def __init__(self, creator):
self.creator = creator
def __missing__(self, key):
self[key] = val = self.creator(key)
return val
else:
class PopulateDict(dict):
"""A dict which populates missing values via a creation function."""
def __init__(self, creator):
self.creator = creator
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
self[key] = value = self.creator(key)
return value
# define collections that are capable of storing
# ColumnElement objects as hashable keys/elements.
column_set = set
column_dict = dict
ordered_column_set = OrderedSet
populate_column_dict = PopulateDict
def unique_list(seq, hashfunc=None):
seen = {}
if not hashfunc:
return [x for x in seq
if x not in seen
and not seen.__setitem__(x, True)]
else:
return [x for x in seq
if hashfunc(x) not in seen
and not seen.__setitem__(hashfunc(x), True)]
class UniqueAppender(object):
"""Appends items to a collection ensuring uniqueness.
Additional appends() of the same object are ignored. Membership is
determined by identity (``is a``) not equality (``==``).
"""
def __init__(self, data, via=None):
self.data = data
self._unique = {}
if via:
self._data_appender = getattr(data, via)
elif hasattr(data, 'append'):
self._data_appender = data.append
elif hasattr(data, 'add'):
self._data_appender = data.add
def append(self, item):
id_ = id(item)
if id_ not in self._unique:
self._data_appender(item)
self._unique[id_] = True
def __iter__(self):
return iter(self.data)
def to_list(x, default=None):
if x is None:
return default
if not isinstance(x, (list, tuple)):
return [x]
else:
return x
def to_set(x):
if x is None:
return set()
if not isinstance(x, set):
return set(to_list(x))
else:
return x
def to_column_set(x):
if x is None:
return column_set()
if not isinstance(x, column_set):
return column_set(to_list(x))
else:
return x
def update_copy(d, _new=None, **kw):
"""Copy the given dict and update with the given values."""
d = d.copy()
if _new:
d.update(_new)
d.update(**kw)
return d
def flatten_iterator(x):
"""Given an iterator of which further sub-elements may also be
iterators, flatten the sub-elements into a single iterator.
"""
for elem in x:
if not isinstance(elem, basestring) and hasattr(elem, '__iter__'):
for y in flatten_iterator(elem):
yield y
else:
yield elem
class WeakIdentityMapping(weakref.WeakKeyDictionary):
"""A WeakKeyDictionary with an object identity index.
Adds a .by_id dictionary to a regular WeakKeyDictionary. Trades
performance during mutation operations for accelerated lookups by id().
The usual cautions about weak dictionaries and iteration also apply to
this subclass.
"""
_none = symbol('none')
def __init__(self):
weakref.WeakKeyDictionary.__init__(self)
self.by_id = {}
self._weakrefs = {}
def __setitem__(self, object, value):
oid = id(object)
self.by_id[oid] = value
if oid not in self._weakrefs:
self._weakrefs[oid] = self._ref(object)
weakref.WeakKeyDictionary.__setitem__(self, object, value)
def __delitem__(self, object):
del self._weakrefs[id(object)]
del self.by_id[id(object)]
weakref.WeakKeyDictionary.__delitem__(self, object)
def setdefault(self, object, default=None):
value = weakref.WeakKeyDictionary.setdefault(self, object, default)
oid = id(object)
if value is default:
self.by_id[oid] = default
if oid not in self._weakrefs:
self._weakrefs[oid] = self._ref(object)
return value
def pop(self, object, default=_none):
if default is self._none:
value = weakref.WeakKeyDictionary.pop(self, object)
else:
value = weakref.WeakKeyDictionary.pop(self, object, default)
if id(object) in self.by_id:
del self._weakrefs[id(object)]
del self.by_id[id(object)]
return value
def popitem(self):
item = weakref.WeakKeyDictionary.popitem(self)
oid = id(item[0])
del self._weakrefs[oid]
del self.by_id[oid]
return item
def clear(self):
# Py2K
# in 3k, MutableMapping calls popitem()
self._weakrefs.clear()
self.by_id.clear()
# end Py2K
weakref.WeakKeyDict |
from __future__ import print_function, absolute_import
import random
import unittest
from pysmoke import marshal
from pysmoke.smoke import ffi, Type, TypedValue, pystring, smokec, not_implemented, charp, dbg
from pysmoke import QtCore, QtGui
qtcore = QtCore.__binding__
qtgui = QtGui.__binding__
class MarshalTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_qstring(self) | :
qstr = marshal.QString.from_py('aqstring')
print(qstr)
pstr = marshal.QString.to_p | y(qstr)
#dbg()
self.assertEqual(pstr, 'aqstring')
import gc; gc.collect()
qstr2 = marshal.QString.from_py(pstr)
print('QS:', qstr, pstr, qstr2, marshal.QString.to_py(qstr))
obj = QtGui.QObject()
print('obj', obj.__cval__.value.s_voidp)
obj.setObjectName('my_object')
self.assertEqual(obj.objectName(), 'my_object')
if __name__ == '__main__':
unittest.main()
|
#http://pymotw.com/2/socket/multicast.html
import socket
import struct
import sys
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY) |
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
# Receive/respond loop
while True:
print >>sys.stderr, '\nwaiting to receive message'
data, address = sock.recvfrom(1024)
print >>sys.stderr, 'received %s bytes from %s' % (len(data), address)
print >>sys.stderr, data
| print >>sys.stderr, 'sending acknowledgement to', address
sock.sendto('ack', address)
|
#!/usr/bin/python
'''
This programs is to integrate dog reference genome from chr to a single one.
Author: Hongzhi Luo
'''
import gzip
import glob
import shutil
path='/vlsci/LSC0007/shared/canine_alport_syndrome/ref_files/'
#path=''
prefix='cfa_ref_CanFam3.1'
def integrate_genome():
'''
@param: num: chr1...chrMT in the list.
'''
f | iles=sorted(glob.glob(path+prefix+"*.fa.gz"))
#cat_together=[]
for f in files:
#cat_together.append(f)
#for files in cat_together:
outfile=gzip.open(path+prefix+".fa.gz",'wb')
for f in files:
gfile=gzip.open(f)
outfile.write(gfile.read())
gfile.clo | se()
outfile.close()
if __name__ == '__main__':
integrate_genome() |
"""
This example uses OpenGL via Pyglet and draws
a bunch of rectangles on the screen.
"""
import random
import time
import pyglet.gl as GL
import pyglet
import ctypes
# Set up the constants
SCREEN_WIDTH = 700
SCREEN_HEIGHT = 500
RECT_WIDTH = 50
RECT_HEIGHT = 50
class Shape():
def __init__(self):
self.x = 0
self.y = 0
class VertexBuffer():
""" Class to hold vertex buffer info. """
def __init__(self, vbo_id, size):
self.vbo_id = vbo_id
self.size = size
def add_rect(rect_list, x, y, width, height, color):
""" Create a vertex buffer for a rectangle. """
rect_list.extend([-width / 2, -height / 2,
width / 2, -height / 2,
width / 2, height / 2,
-width / 2, height / 2])
def create_vbo_for_rects(v2f):
vbo_id = GL.GLuint()
GL.glGenBuffers(1, ctypes.pointer(vbo_id))
data2 = (GL.GLfloat*len(v2f))(*v2f)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, vbo_id)
GL.glBufferData(GL.GL_ARRAY_BUFFER, ctypes.sizeof(data2), data2,
GL.GL_STATIC_DRAW)
shape = VertexBuffer(vbo_id, len(v2f)//2)
return shape
def render_rect_filled(shape, x, y):
""" Render the shape at the right spot. """
# Set color
GL.glDisable(GL.GL_BLEND)
GL.glColor4ub(shape.color[0], shape.color[1], shape.color[2], 255)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, shape.vbo_id)
GL.glVertexPointer(2, GL.GL_FLOAT, 0, 0)
GL.glLoadIdentity()
GL.glTranslatef(x + shape.width / 2, y + shape.height / 2, 0)
GL.glDrawArrays(GL.GL_QUADS, 0, shape.size)
class MyApplication():
""" Main application class. """
def setup(self):
""" Set up the game and initialize the variables. """
# Set background to white
GL.glClearColor(1, 1, 1, 1)
self.rect_list = []
self.shape_list = []
for i in range(2000):
x = random.randrange(0, SCREEN_WIDTH)
y = random.randrange(0, SCREEN_HEIGHT)
width = random.randrange(20, 71)
height = random.randrange(20, 71)
d_x = random.randrange(-3, 4)
d_y = random.randrange(-3, 4)
| red = random.randrange(256)
blue = random.randrange(256)
green = random.randrange(256)
alpha = random.randrange(256)
color = (red, blue, green, alpha)
shape = Shape()
shape.x = x
shape.y = y
self.shape_list.append(shape)
| add_rect(self.rect_list, 0, 0, width, height, color)
print("Creating vbo for {} vertices.".format(len(self.rect_list) // 2))
self.rect_vbo = create_vbo_for_rects(self.rect_list)
print("VBO {}".format(self.rect_vbo.vbo_id))
def animate(self, dt):
""" Move everything """
pass
def on_draw(self):
"""
Render the screen.
"""
start = time.time()
float_size = ctypes.sizeof(ctypes.c_float)
record_len = 10 * float_size
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glEnableClientState(GL.GL_VERTEX_ARRAY)
GL.glColor4ub(255, 0, 0, 255)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.rect_vbo.vbo_id)
GL.glVertexPointer(2, GL.GL_FLOAT, record_len, 0)
for i in range(len(self.shape_list)):
shape = self.shape_list[i]
GL.glLoadIdentity()
GL.glTranslatef(shape.x, shape.y, 0)
GL.glDrawArrays(GL.GL_QUADS, i * 8, 8)
# GL.glDrawArrays(GL.GL_QUADS,
# 0,
# self.rect_vbo.size)
elapsed = time.time() - start
print(elapsed)
def main():
window = pyglet.window.Window(SCREEN_WIDTH, SCREEN_HEIGHT)
app = MyApplication()
app.setup()
pyglet.clock.schedule_interval(app.animate, 1/60)
@window.event
def on_draw():
window.clear()
app.on_draw()
pyglet.app.run()
main()
|
logical_resource_id=logical_resource_id
)
def describe_resources(self, logical_resource_id=None,
physical_resource_id=None):
return self.connection.describe_stack_resources(
stack_name_or_id=self.stack_id,
logical_resource_id=logical_resource_id,
physical_resource_id=physical_resource_id
)
def list_resources(self, next_token=None):
return self.connection.list_stack_resources(
stack_name_or_id=self.stack_id,
next_token=next_token
)
def update(self):
rs = self.connection.describe_stacks(self.stack_id)
if len(rs) == 1 and rs[0].stack_id == self.stack_id:
self.__dict__.update(rs[0].__dict__)
else:
raise ValueError("%s is not a valid Stack ID or Name" %
self.stack_id)
def get_template(self):
return self.connection.get_template(stack_name_or_id=self.stack_id)
def get_policy(self):
"""
Returns the stack policy for this stack. If it has no policy
then, a null value is returned.
"""
return self.connection.get_stack_policy(self.stack_id)
def set_policy(self, stack_policy_body=None, stack_policy_url=None):
"""
Sets a stack policy for this stack.
:type stack_policy_body: string
:param stack_policy_body: Structure containing the stack policy body.
(For more information, go to ` Prevent Updates to Stack Resources`_
in the AWS CloudFormation User Guide.)
You must pass `StackPolicyBody` or `StackPolicyURL`. If both are
passed, only `StackPolicyBody` is used.
:type stack_policy_url: string
:param stack_policy_url: Location of a file containing the stack
policy. The URL must point to a policy (max size: 16KB) located in
an S3 bucket in the same region as the stack. You must pass
`StackPolicyBody` or `StackPolicyURL`. If both are passed, only
`StackPolicyBody` is used.
"""
return self.connection.set_stack_policy(self.stack_id,
stack_policy_body=stack_policy_body,
stack_policy_url=stack_policy_url)
class StackSummary(object):
def __init__(self, connection=None):
self.connection = connection
self.stack_id = None
self.stack_status = None
self.stack_name = None
self.creation_time = None
self.deletion_time = None
self.template_description = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'StackId':
self.stack_id = value
elif name == 'StackStatus':
self.stack_status = value
elif name == 'StackName':
self.stack_name = value
elif name == 'CreationTime':
try:
self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
elif name == "DeletionTime":
try:
self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
elif name == 'TemplateDescription':
self.template_description = value
elif name == "member":
pass
else:
setattr(self, name, value)
class Parameter(object):
def __init__(self, connection=None):
self.connection = None
self.key = None
self.value = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "ParameterKey":
self.key = value
elif name == "ParameterValue":
self.value = value
else:
setattr(self, name, value)
def __repr__(self):
return "Parameter:\"%s\"=\"%s\"" % (self.key, self.value)
class Output(object):
def __init__(self, connection=None):
self.connection = connection
self.description = None
self.key = None
self.value = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "Description":
self.description = value
elif name == "OutputKey":
self.key = value
elif name == "OutputValue":
self.value = value
else:
setattr(self, name, value)
def __repr__(self):
return "Output:\"%s\"=\"%s\"" % (self.key, self.value)
class Capability(object):
def __init__(self, connection=None):
self.connection = None
self.value = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
self.value = value
def __repr__(self):
return "Capability:\"%s\"" % (self.value)
class Tag(dict):
def __init__(self, connection=None):
dict.__init__(self)
self.connection = connection
self._current_key = None
self._current_value = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "Key":
self._current_key = value
elif name == "Value":
self._current_value = value
else:
setattr(self, name, value)
if self._current_key and self._current_value:
self[self._current_key] = self._current_value
self._current_key = None
self._current_value = None
class NotificationARN(object):
def __init__(self, connection=None):
self.connection = None
self.value = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
self.value = value
def __repr__(self):
return "NotificationARN:\"%s\"" % (self.value)
class StackR | esource(object):
def __init__(self, connection=None):
self.connection = connection
self.description = None
self.logical_resource_id = None
self.physical_resource_id = None
self.resource_status = None
self.resource_status_reason = None
self.resource_type = None
self.stack_id = None
self.stack_name = None
self.timestamp = None
def startEle | ment(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "Description":
self.description = value
elif name == "LogicalResourceId":
self.logical_resource_id = value
elif name == "PhysicalResourceId":
self.physical_resource_id = value
elif name == "ResourceStatus":
self.resource_status = value
elif name == "ResourceStatusReason":
self.resource_status_reason = value
elif name == "ResourceType":
self.resource_type = value
elif name == "StackId":
self.stack_id = value
elif name == "StackName":
self.stack_name = value
elif name == "Timestamp":
try:
self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
else:
setattr(self, name, value)
def __repr__(self):
return "StackResource:%s (%s)" % (self.logical_resource_id,
self.resource_type)
class StackResourceSummary(object):
def __init__(self, connection=None):
self.connection = connection
self.last_updated_time = None
self.logical_resource_id = None
self.physical_resource_id = None
self.resource_status = None
self.resource_status_reason = None
self.resource_type = None
|
import json
import datetime
import http.client
from time import time
########################################################################################################################
##################################################### ENVIRONMENTS #####################################################
########################################################################################################################
#local
conn = http.client.HTTPConnection("localhost:5000")
#container
# conn = http.client.HTTPConnection("localhost:5000")
########################################################################################################################
######################################################## USERS #########################################################
########################################################################################################################
headers = {
'Content-type': 'application/json'
}
#Create person
# create_person_post = {
# 'id': 3,
# 'n | ame': 'Carlos',
# 'email': 'carlos@gmail.com',
# 'login': 'llano',
# 'password': '123456'
# }
# json_data_post = json.dumps(create_person_post)
# conn.request("POST", "/persons/", json_data_post, headers=headers)
#Friends of a person
#conn.request("GET", "/perso | ns/1/friends", headers=headers)
#Friends of the friends of a person
#conn.request("GET", "/persons/0/mayYouKnow", headers=headers)
#Add a new relationship
conn.request("POST", "/persons/person1/3/person2/4", headers=headers)
#Delete a relationship
# conn.request("POST", "/persons/delete/person1/3/person2/4", headers=headers)
start = datetime.datetime.now()
res = conn.getresponse()
end = datetime.datetime.now()
data = res.read()
elapsed = end - start
print(data.decode("utf-8"))
print("\"" + str(res.status) + "\"")
print("\"" + str(res.reason) + "\"")
print("\"elapsed seconds: " + str(elapsed) + "\"")
|
from pandarus.maps import Map, DuplicateFieldID
from rtree import Rtree
import fiona
import os
import pandarus
import pytest
dirpath = os.path.abspath(os.path.join(os.path.dirname(__file__), "data"))
grid = os.path.join(dirpath, "grid.geojson")
duplicates = os.path.join(dirpath, "duplicates.geojson")
raster = os.path.join(dirpath, "test_raster_cfs.tif")
countries = os.path.join(dirpath, "test_countries.gpkg")
def test_init():
m = Map(grid, 'name')
assert m.filepath == grid
assert m.file
def test_raster_error(monkeypatch):
with pytest.raises(AssertionError):
m = Map(raster, None)
def test_metadata(monkeypatch):
m = Map(grid, 'name')
assert m.metadata == {}
def fake_open(filepath, **others):
return others
monkeypatch.setattr(
pandarus.maps,
'check_type',
lambda x: 'vector'
)
monkeypatch.setattr(
pandarus.maps.fiona,
'open',
fake_open
)
m = Map(grid, 'name', | foo='bar')
assert m.metadata == {'foo': 'bar'}
assert m.file == {'foo': 'bar'}
def test_get_fieldnames_dictionary():
m = Map(grid, 'name')
expected = {0: 'grid cell 0', 1: 'grid cell 1',
2: 'grid cell 2', 3: | 'grid cell 3'}
assert m.get_fieldnames_dictionary("name") == expected
def test_get_fieldnames_dictionary_errors():
m = Map(grid, 'name')
assert m.get_fieldnames_dictionary()
assert m.get_fieldnames_dictionary(None)
assert m.get_fieldnames_dictionary("")
with pytest.raises(AssertionError):
m.get_fieldnames_dictionary("bar")
dupes = Map(duplicates, 'name')
with pytest.raises(DuplicateFieldID):
dupes.get_fieldnames_dictionary()
def test_properties():
m = Map(grid, 'name')
assert m.geometry == 'Polygon'
assert m.hash
assert m.crs == '+init=epsg:4326'
def test_magic_methods():
m = Map(grid, 'name')
for i, x in enumerate(m):
pass
assert i == 3
expected = {
'geometry': {
'type': 'Polygon',
'coordinates': [[(1.0, 0.0), (1.0, 1.0), (2.0, 1.0), (2.0, 0.0), (1.0, 0.0)]]
},
'properties': {'name': 'grid cell 2'},
'id': '2',
'type': 'Feature'
}
assert m[2] == expected
assert len(m) == 4
def test_getitem():
print("Supported Fiona drivers:")
print(fiona.supported_drivers)
m = Map(grid, 'name')
expected = {
'geometry': {
'type': 'Polygon',
'coordinates': [[(1.0, 0.0), (1.0, 1.0), (2.0, 1.0), (2.0, 0.0), (1.0, 0.0)]]
},
'properties': {'name': 'grid cell 2'},
'id': '2',
'type': 'Feature'
}
assert m[2] == expected
assert hasattr(m, "_index_map")
@pytest.mark.skipif('TRAVIS' in os.environ,
reason="No GPKG driver in Travis")
def test_getitem_geopackage():
print("Supported Fiona drivers:")
print(fiona.supported_drivers)
m = Map(countries, 'name')
assert m[0]
assert m[0]['id'] == '1'
assert hasattr(m, "_index_map")
def test_rtree():
m = Map(grid, 'name')
r = m.create_rtree_index()
assert r == m.rtree_index
assert isinstance(r, Rtree)
|
#!/usr/bin/env python
import numpy
import datavision
def main():
print("\ngenerate two arrays of data")
a = numpy.random.normal(2, 2, size = 120)
b = numpy.random.normal(2, 2, size = 120)
print("\narray 1:\n{array_1}\n\narray 2:\n{array_2}".format(
array_1 = a,
array_2 = b
))
filename = "histogram_1.png"
print("\nsave histogram of array 1 to {filename}".format(
filename = filename
))
datavision.save_histogram_matplotlib(
a,
filename = filename,
color_fill = "#000000"
)
filename = "histogram_comparison_1.png"
print("\nsave histogram comparison of array 1 and array 2 to {filename}".format(
filename = filename
))
datavision.save_histogram_comparison_matplotlib(
values_1 = a,
values_2 = b,
label_1 | = "a",
label_2 = "b",
normalize = True,
label_ratio_x = "measurement",
label_y = "",
title = "compari | son of a and b",
filename = filename
)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
import random
import itertools
from functools import partial
from navmazing import NavigateToAttribute, NavigateToSibling
from widgetastic_manageiq import Table
from cfme.base.ui import BaseLoggedInPage
from cfme.common import SummaryMixin, Taggable
from cfme.containers.provider import Labelable
from cfme.fixtures import pytest_selenium as sel
from cfme.web_ui import toolbar as tb, match_location,\
PagedTable, CheckboxTable
from .provider import details_page
from utils.appliance import Navigatable
from utils.appliance.implementations.ui import navigator, CFMENavigateStep,\
navigate_to
list_tbl = CheckboxTable(table_locator="//div[@id='list_grid']//table")
paged_tbl = PagedTable(table_locator="//div[@id='list_grid']//table")
match_page = partial(match_location, controller='container_templates', title='Container Templates')
class Template(Taggable, Labelable, SummaryMixin, Navigatable):
PLURAL = 'Templates'
def __init__(self, name, project_name, provider, appliance=None):
self.name = name
self.project_name = project_name
self.provider = provider
Navigatable.__init__(self, appliance=appliance)
def load_details(self, refresh=False):
navigate_to(self, 'Details')
if refresh:
tb.refresh()
def click_element(self, *ident):
self.load_details(refresh=True)
return sel.click(details_page.infoblock.element(*ident))
def get_detail(self, *ident):
""" Gets details from the details infoblock
Args:
*ident: An InfoBlock title, followed by the Key name, e.g. "Relationships", "Images"
Returns: A string representing the contents of the InfoBlock's value.
"""
self.load_details(refresh=True)
return details_page.infoblock.text(*ident)
@classmethod
def get_random_instances(cls, provider, count=1, appliance=None):
"""Generating random instances."""
template_list = provider.mgmt.list_template()
random.shuffle(template_list)
return [cls(obj.name, obj.project_name, provider, appliance=appliance)
for obj in itertools.islice(template_list, count)]
class TemplateAllView(BaseLoggedInPage):
table = Table(locator="//div[@id='list_grid']//table")
@property
def is_displayed(self):
return match_page(summary='Container Templates')
@navigator.reg | ister(Template, 'All')
class All(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
VIEW = T | emplateAllView
def step(self):
self.prerequisite_view.navigation.select('Compute', 'Containers', 'Container Templates')
def resetter(self):
# Reset view and selection
tb.select("List View")
from cfme.web_ui import paginator
paginator.check_all()
paginator.uncheck_all()
@navigator.register(Template, 'Details')
class Details(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def am_i_here(self):
return match_page(summary='{} (Summary)'.format(self.obj.name))
def step(self):
tb.select('List View')
sel.click(paged_tbl.find_row_by_cell_on_all_pages({'Name': self.obj.name,
'Project Name': self.obj.project_name}))
|
import os
import sys
import stat
from eyed3 import main, version
from . import RedirectStdStreams
def _initTag(afile):
afile.initTag()
afile.tag.artist = "Bad Religion"
afile.tag.title = "Suffer"
afile.tag.album = "Suffer"
afile.tag.release_date = "1988"
afile.tag.recording_date = "1987"
afile.tag.track_num = (9, 15)
afile.tag.save()
def _runPlugin(afile, plugin) -> str:
with RedirectStdStreams() as plugin_out:
args, _, config = main.parseCommandLine(["-P", plugin, str(afile.path)])
as | sert main.main(args, config) == 0
stdout = plugin_out.stdout.read().strip()
print(stdout)
return stdout
def _assertFormat(plugin: str, audio_file, format: str):
output = _runPlugin(audio_file, plugin)
print(output)
size_bytes = os.stat(audio_file.path)[stat.ST_SIZE]
assert output.strip() == format.strip() % dict(path=audio_file.path, version=version,
size_bytes=size_bytes)
def testJsonPlugin(audiofile):
_i | nitTag(audiofile)
_assertFormat("json", audiofile, """
{
"path": "%(path)s",
"info": {
"time_secs": 10.68,
"size_bytes": %(size_bytes)d
},
"album": "Suffer",
"artist": "Bad Religion",
"best_release_date": "1988",
"recording_date": "1987",
"release_date": "1988",
"title": "Suffer",
"track_num": {
"count": 9,
"total": 15
},
"_eyeD3": "%(version)s"
}
""")
def testYamlPlugin(audiofile):
_initTag(audiofile)
omap, omap_list = "", " "
if sys.version_info[:2] <= (3, 7):
omap = " !!omap"
omap_list = "- "
_assertFormat("yaml", audiofile, f"""
---
_eyeD3: %(version)s
album: Suffer
artist: Bad Religion
best_release_date: '1988'
info:
size_bytes: %(size_bytes)d
time_secs: 10.68
path: %(path)s
recording_date: '1987'
release_date: '1988'
title: Suffer
track_num:{omap}
{omap_list}count: 9
{omap_list}total: 15
""")
|
from allauth.socialaccount.tests import OAuth2TestsMixin
from allauth.tests import MockedResponse, TestCase
from .provider import JupyterHubProvider
class JupyterHubTests(OAuth2TestsMix | in, Te | stCase):
provider_id = JupyterHubProvider.id
def get_mocked_response(self):
return MockedResponse(200, """
{
"kind": "user",
"name": "abc",
"admin": false,
"groups": [],
"server": null,
"pending": null,
"created": "2016-12-06T18:30:50.297567Z",
"last_activity": "2017-02-07T17:29:36.470236Z",
"servers": null}
""")
|
/*Owner & Copyrights: Vance King Saxbe. A.*/""" Copyright (c) <2014> Author Vance King Saxbe. A, and contributors Power Dominion Enterprise, Precieux Consulting and other contributors. Modelled, Architected and designed by Vance King Saxbe. A. with the geeks from GoldSax Consulting and GoldSax Technologies email @vsaxbe@yahoo.com. Development teams from Power Dominion Enterprise, Precieux Consulting. Project sponsored by GoldSax Foundation, GoldSax Group and executed by GoldSax Manager."""#!/usr/bin/env python3
import _thread
import os
import sys
import time
import gc
from src import googlequotemachine
from src import yahooquotemachine
from src import bloombergquotemachine
from src import createtablesgooglefinance
from src import createtablesyahoofinance
from src import createtablesbloomberg
from time import localtime, strftime
start1 = []
sys.setrecursionlimit(1000000)
database = "data/"
markettime = {}
with open("conf/MarketTimings.conf") as fillees:
mlist = fillees.read().splitlines()
fillees.close()
for line in mlist:
items = line.split(", ")
key, values = items[0], items[1]
markettime[key] = values
with open('conf/symbolname.conf') as fille:
synamelist = fille.read().splitlines()
fille.close()
timetorun = 1800
cycle = 1
while("TRUE"):
with open('conf/urls.conf') as openedfile:
fileaslist = openedfile.read().splitlines()
openedfile.close()
a_lock = _thread.allocate_lock()
thr = []
with a_lock:
print("locks placed and Market engine is running for the...", cycle)
for lines in fileaslist:
lisj = lines.split('", "')
mtime = markettime[lisj[2].replace('"','')]
mktime = mtime.split("-")
if mktime[1] < mktime[0]:
righto = mktime[1].split(":")
close = str(str(int(righto[0])+24)+":"+righto[1])
else:
close = mktime[1]
rightnow = strftime("%H:%M", localtime())
if rightnow < strftime("04:00"):
right = rightnow.split(":")
rightnow = str(str(int(right[0])+24)+":"+right[1])
if (close > rightnow > mktime[0]):
print("Market ", lisj[2].replace('.db"',''), " is starting at cycle ", cycle)
if lisj[1] =='g':
| thr.append(_thread.start_new_thread(googlequotemachine.actionking, (a_lock, start1, lisj[0].replace('"',''),database+lisj[2].replace('"',''),0,synamelist,1,0,timetorun) ))
elif lisj[1] =='y':
thr.ap | pend(_thread.start_new_thread(yahooquotemachine.actionking, (a_lock,start1, lisj[0].replace('"',''),database+lisj[2].replace('"',''),0,synamelist,1,0,timetorun) ))
else:
thr.append(_thread.start_new_thread(bloombergquotemachine.actionking, (a_lock,start1, lisj[0].replace('"',''),database+lisj[2].replace('"',''),0,) ))
time.sleep(0.00001)
print("locks placed and Market engine is running for the....", cycle, " time...with threads", thr )
time.sleep(timetorun)
gc.collect()
print("locks released and Market engine is restarting for the...", cycle, " time...")
cycle = cycle + 1
/*email to provide support at vancekingsaxbe@powerdominionenterprise.com, businessaffairs@powerdominionenterprise.com, For donations please write to fundraising@powerdominionenterprise.com*/ |
import numpy as np
from bregman.suite import *
from cjade import cjade
from scipy.optimize import curve_fit
from numpy.linalg.linalg import svd
def cseparate(x, M=None, N=4096, H =1024, W=4096, max_iter=200, pre_emphasis=True, magnitude_only=False, svd_only=False, transpose_spectrum=False):
"""
complex-valued frequency domain separation by independent components
using relative phase representation
inputs:
x - the audio signal to separate (1 row)
M - the number of sources to extract
options:
N - fft length in samples [4096]
H - hop size in samples [1024]
W - window length in samples (fft padded with N-W zeros) [4096]
max_iter - maximum JADE ICA iterations [200]
pre_emphasis - app | ly an exponential spectral pre-emphasis filter [False]
magnitude_only - whether to use magnitude-only spectrum (real-valued factorization)
svd_only - whether to use SVD instead of JADE
transpose_spectrum - whether to transpose the spectrum prior to factorization
output:
xhat - the separated signals (M rows)
xhat_all - the M separated | signals mixed (1 row)
Copyright (C) 2014 Michael A. Casey, Bregman Media Labs,
Dartmouth College All Rights Reserved
"""
def pre_func(x, a, b, c):
return a * np.exp(-b * x) + c
M = 20 if M is None else M
phs_rec = lambda rp,dp: (np.angle(rp)+np.tile(np.atleast_2d(dp).T,rp.shape[1])).cumsum(1)
F = LinearFrequencySpectrum(x, nfft=N, wfft=W, nhop=H)
U = F._phase_map()
XX = np.absolute(F.STFT)
if pre_emphasis:
xx = np.arange(F.X.shape[0])
yy = XX.mean(1)
popt, pcov = curve_fit(pre_func, xx, yy)
XX = (XX.T * (1/pre_func(xx,*popt))).T
# w = np.r_[np.ones(64), .05*xx[64:]]
# XX = (XX.T * w).T
if magnitude_only:
X = XX
else:
X = XX * np.exp(1j * np.array(F.dPhi)) # Relative phase STFT
if transpose_spectrum:
X = X.T
if svd_only:
u,s,v = svd(X.T)
A = np.dot(u[:,:M], np.diag(s)[:M,:M])
S = v[:M,:] # v = V.H in np.linalg.svd
AS = np.dot(A,S).T # Non Hermitian transpose avoids complex conjugation
else:
A,S = cjade(X.T, M, max_iter) # complex-domain JADE by J. F. Cardoso
AS = np.array(A*S).T # Non Hermitian transpose avoids complex conjugation
if transpose_spectrum:
AS = AS.T
X_hat = np.absolute(AS)
if pre_emphasis:
#X_hat = (XX.T / (w)).T
X_hat = (XX.T * pre_func(xx,*popt)).T
Phi_hat = phs_rec(AS, F.dphi)
x_hat_all = F.inverse(X_hat=X_hat, Phi_hat=Phi_hat, usewin=True)
x_hat = []
for k in np.arange(M):
if svd_only:
AS = np.dot(A[:,k][:,np.newaxis],S[k,:][np.newaxis,:]).T
else:
AS = np.array(A[:,k]*S[k,:]).T
if transpose_spectrum:
AS = AS.T
X_hat = np.absolute(AS)
if pre_emphasis:
#X_hat = (XX.T / (w)).T
X_hat = (XX.T * pre_func(xx,*popt)).T
Phi_hat = phs_rec(AS, F.dphi)
x_hat.append(F.inverse(X_hat=X_hat, Phi_hat=Phi_hat, usewin=True))
return x_hat, x_hat_all
|
def extractJstranslationBlogspotCom(item):
'''
Parser for 'jstranslation.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translate | d'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType( | item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
dWright's interchange branch to create a DCP using the entry point
com.xilinx.rapidwright.interchange.PhysicalNetlistToDcp
Example:
export RAPIDWRIGHT_PATH=~/RapidWright
$RAPIDWRIGHT_PATH/scripts/invoke_rapidwright.sh \
com.xilinx.rapidwright.interchange.PhysicalNetlistToDcp \
test.netlist test.phys test.xdc test.dcp
"""
import argparse
from fpga_interchange.interchange_capnp import Interchange, write_capnp_file
from fpga_interchange.logical_netlist import Library, Cell, Direction, CellInstance, LogicalNetlist
from fpga_interchange.physical_netlist import PhysicalNetlist, PhysicalBelPin, \
Placement, PhysicalPip, PhysicalSitePin, PhysicalSitePip, \
chain_branches, chain_pips, PhysicalNetType, PhysicalCellType
def example_logical_netlist():
hdi_primitives = Library('hdi_primitives')
cell = Cell('FDRE')
cell.add_port('D', Direction.Input)
cell.add_port('C', Direction.Input)
cell.add_port('CE', Direction.Input)
cell.add_port('R', Direction.Input)
cell.add_port('Q', Direction.Output)
hdi_primitives.add_cell(cell)
cell = Cell('IBUF')
cell.add_port('I', Direction.Input)
cell.add_port('O', Direction.Output)
hdi_primitives.add_cell(cell)
cell = Cell('OBUF')
cell.add_port('I', Direction.Input)
cell.add_port('O', Direction.Output)
hdi_primitives.add_cell(cell)
cell = Cell('BUFG')
cell.add_port('I', Direction.Input)
cell.add_port('O', Direction.Output)
hdi_primitives.add_cell(cell)
cell = Cell('VCC')
cell.add_port('P', Direction.Output)
hdi_primitives.add_cell(cell)
cell = Cell('GND')
cell.add_port('G', Direction.Output)
hdi_primitives.add_cell(cell)
top = Cell('top')
top.add_port('i', Direction.Input)
top.add_port('clk', Direction.Input)
top.add_port('o', Direction.Output)
top.add_cell_instance('ibuf', 'IBUF')
top.add_cell_instance('obuf', 'OBUF')
top.add_cell_instance('clk_ibuf', 'IBUF')
top.add_cell_instance('clk_buf', 'BUFG')
top.add_cell_instance('ff', 'FDRE')
top.add_cell_instance('VCC', 'VCC')
top.add_cell_instance('GND', 'GND')
top.add_net('i')
top.connect_net_to_cell_port('i', 'i')
top.connect_net_to_instance('i', 'ibuf', 'I')
top.add_net('i_buf')
top.connect_net_to_instance('i_buf', 'ibuf', 'O')
top.connect_net_to_instance('i_buf', 'ff', 'D')
top.add_net('o_buf')
top.connect_net_to_instance('o_buf', 'ff', 'Q')
top.connect_net_to_instance('o_buf', 'obuf', 'I')
top.add_net('o')
top.connect_net_to_instance('o', 'obuf', 'O')
top.connect_net_to_cell_port('o', 'o')
top.add_net('clk')
top.connect_net_to_cell_port('clk', 'clk')
top.connect_net_to_instance('clk', 'clk_ibuf', 'I')
top.add_net('clk_ibuf')
top.connect_net_to_instance('clk_ibuf', 'clk_ibuf', 'O')
top.connect_net_to_instance('clk_ibuf', 'clk_buf', 'I')
top.add_net('clk_buf')
top.connect_net_to_instance('clk_buf', 'clk_buf', 'O')
top.connect_net_to_instance('clk_buf', 'ff', 'C')
top.add_net('GLOBAL_LOGIC1')
top.connect_net_to_instance('GLOBAL_LOGIC1', 'VCC', 'P')
top.connect_net_to_instance('GLOBAL_LOGIC1', 'ff', 'CE')
top.add_net('GLOBAL_LOGIC0')
top.connect_net_to_instance('GLOBAL_LOGIC0', 'GND', 'G')
top.connect_net_to_instance('GLOBAL_LOGIC0', 'ff', 'R')
work = Library('work')
work.add_cell(top)
logical_netlist = LogicalNetlist(
name='top',
top_instance_name='top',
top_instance=CellInstance(
cell_name='top',
view='netlist',
property_map={},
),
property_map={},
libraries={
'work': work,
'hdi_primitives': hdi_primitives,
})
return logical_netlist
def example_physical_netlist():
phys_netlist = PhysicalNetlist(part='xc7a50tfgg484-1')
ibuf_placement = Placement(
cell_type='IBUF', cell_name='ibuf', site='IOB_X0Y12', bel='INBUF_EN')
ibuf_placement.add_bel_pin_to_cell_pin(bel_pin='PAD', cell_pin='I')
ibuf_placement.add_bel_pin_to_cell_pin(bel_pin='OUT', cell_pin='O')
phys_netlist.add_placement(ibuf_placement)
phys_netlist.add_site_instance(site_name='IOB_X0Y12', site_type='IOB33')
obuf_placement = Placement(
cell_type='OBUF', cell_name='obuf', site='IOB_X0Y11', bel='OUTBUF')
obuf_placement.add_bel_pin_to_cell_pin(bel_pin='IN', cell_pin='I')
obuf_placement.add_bel_pin_to_cell_pin(bel_pin='OUT', cell_pin='O')
phys_netlist.add_placement(obuf_placement)
phys_netlist.add_site_instance(site_name='IOB_X0Y11', site_type='IOB33')
clk_ibuf_placement = Placement(
cell_type='IBUF',
cell_name='clk_ibuf',
site='IOB_X0Y24',
bel='INBUF_EN')
clk_ibuf_placement.add_bel_pin_to_cell_pin(bel_pin='PAD', cell_pin='I')
clk_ibuf_placement.add_bel_pin_to_cell_pin(bel_pin='OUT', cell_pin='O')
phys_netlist.add_placement(clk_ibuf_placement)
phys_netlist.add_site_instance(site_name='IOB_X0Y24', site_type='IOB33')
clk_buf_placement = Placement(
cell_type='BUFG',
cell_name='clk_buf',
site='BUFGCTRL_X0Y0',
bel='BUFG')
clk_buf_placement.add_bel_pin_to_cell_pin(bel_pin='I0', cell_pin='I')
clk_buf_placement.add_bel_pin_to_cell_pin(bel_pin='O', cell_pin='O')
phys_netlist.add_placement(clk_buf_placement)
phys_netlist.add_site_instance(site_name='BUFGCTRL_X0Y0', site_type='BUFG')
ff_placement = Placement(
cell_type='FDRE', cell_name='ff', site='SLICE_X1Y12', bel='AFF')
ff_placement.add_bel_pin_to_cell_pin(bel_pin='SR', cell_pin='R')
ff_placement.add_bel_pin_to_cell_pin(bel_pin='D', cell_pin='D')
ff_placement.add_bel_pin_to_cell_pin(bel_pin='Q', cell_pin='Q')
ff_placement.add_bel_pin_to_cell_pin(bel_pin='CE', cell_pin='CE')
ff_placement.add_bel_pin_to_cell_pin(bel_pin='CK', cell_pin='C')
phys_netlist.add_placement(ff_placement)
phys_netlist.add_site_instance(site_name='SLICE_X1Y12', site_type='SLICEL')
i_root = chain_branches((PhysicalBelPin('IOB_X0Y12', 'PAD', 'PAD'),
PhysicalBelPin('IOB_X0Y12', 'INBUF_EN', 'PAD')))
phys_netlist.add_physical_net(net_name='i', sources=[i_root], stubs=[])
i_buf_root = chain_branches(
(PhysicalBelPin('IOB_X0Y12', 'INBUF_EN', 'OUT'),
PhysicalSitePip('IOB_X0Y12', 'IUSED', '0'),
PhysicalBelPin('IOB_X0Y12', 'I', 'I'),
PhysicalSitePin('IOB_X0Y12', 'I')) +
chain_pips('LIOI3_X0Y11', ('LIOI_IBUF0', 'LIOI_I0', 'LIOI_ILOGIC0_D',
'IOI_ILOGIC0_O', 'IOI_LOGIC_OUTS18_1')) +
(PhysicalPip('IO_INT_INTERFACE_L_X0Y12',
'INT_INTERFACE_LOGIC_OUTS_L_B18',
'INT_INTERFACE_LOGIC_OUTS_L18'),
PhysicalPip('INT_L_X0Y12', 'LOGIC_OUTS_L18', 'EE2BEG0'),
Phy | sicalPip('INT_L_X2Y12', 'EE2END0', 'BYP_ALT0'),
PhysicalPip('INT_L_X2Y12', 'BYP_ALT0', 'BYP_L0'),
PhysicalPip('CLBLL_L_X2Y12', 'CLBLL_BYP0', 'CLBLL_L_AX'),
PhysicalSitePin('SLICE_X1Y12', 'AX'),
PhysicalBelPin('SLICE_X1Y12', 'AX', 'AX'),
PhysicalSitePip('SLICE_X1Y12', 'AFFMUX', 'AX'),
PhysicalBelPin('SLICE_X1Y12', 'AFF', 'D')))
| phys_netlist.add_physical_net(
net_name='i_buf', sources=[i_buf_root], stubs=[])
o_buf_root = chain_branches(
(PhysicalBelPin('SLICE_X1Y12', 'AFF', 'Q'),
PhysicalBelPin('SLICE_X1Y12', 'AQ', 'AQ'),
PhysicalSitePin('SLICE_X1Y12', 'AQ'),
PhysicalPip('CLBLL_L_X2Y12', 'CLBLL_L_AQ', 'CLBLL_LOGIC_OUTS0'),
PhysicalPip('INT_L_X2Y12', 'LOGIC_OUTS_L0', 'SL1BEG0'),
PhysicalPip('INT_L_X2Y11', 'SL1END0', 'WW2BEG0'),
PhysicalPip('INT_L_X0Y11', 'WW2END0', 'IMUX_L34')) +
chain_pips('LIOI3_X0Y11', ('IOI_IMUX34_0', 'IOI_OLOGIC1_D1',
'LIOI_OLOGIC1_OQ', 'LIOI_O1')) +
(
PhysicalSitePin('IOB_X0Y11', 'O'),
PhysicalBelPin('IOB_X0Y11', 'O', 'O'),
PhysicalSitePip('IOB_X0Y11', 'OUSED', '0'),
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import mo | dels, migrations
class Migration(migrations.Migration):
dependencies = [
('agentex', '0003_auto_20150622_1101'),
]
operations = [
migrations.CreateModel(
name='LastLogins',
fields=[
('id', models.AutoField(verbose_name | ='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('slug', models.SlugField(unique=True)),
],
),
]
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glanceclient import client as gc
from glanceclient import exc
from glanceclient.openstack.common.apiclient import exceptions
from heat.engine.clients import client_plugin
from heat.engine.clients import os as os_client
from heat.engine import constraints
CLIENT_NAME = 'glance'
class GlanceClientPlugin(client_plugin.ClientPlugin):
exceptions_module = [exceptions, exc]
service_types = [IMAGE] = ['image']
def _create(self):
con = self.context
endpoint_type = self._get_client_option(CLIENT_NAME, 'endpoint_type')
endpoint = self.url_for(service_type=self.IMAGE,
endpoint_type=endpoint_type)
args = {
'auth_url': con.auth_url,
'service_type': self.IMAGE,
'project_id': con.tenant_id,
'token': self.auth_token,
'endpoint_type': endpoint_type,
'cacert': self._get_client_option(CLIENT_NAME, 'ca_file'),
'cert_file': self._get_client_option(CLIENT_NAME, 'cert_file'),
'key_file': self._get_client_option(CLIENT_NAME, 'key_file'),
'insecure': self._get_client_option(CLIENT_NAME, 'insecure')
}
return gc.Client('1', endpoint, **args)
def _find_with_attr(self, entity, **kwargs):
"""Find a item for entity with attributes matching ``**kwargs``."""
matches = list(self._findall_with_attr(entity, **kwargs))
num_matche | s = len(matches)
if num_matches == 0:
msg = ("No %(name)s matching %(args)s.") % {
'name': entity,
'args': kwargs
}
raise exceptions.NotFound(msg)
elif num_matches > 1:
raise | exceptions.NoUniqueMatch()
else:
return matches[0]
def _findall_with_attr(self, entity, **kwargs):
"""Find all items for entity with attributes matching ``**kwargs``."""
func = getattr(self.client(), entity)
filters = {'filters': kwargs}
return func.list(**filters)
def is_not_found(self, ex):
return isinstance(ex, (exceptions.NotFound, exc.HTTPNotFound))
def is_over_limit(self, ex):
return isinstance(ex, exc.HTTPOverLimit)
def is_conflict(self, ex):
return isinstance(ex, (exceptions.Conflict, exc.Conflict))
def find_image_by_name_or_id(self, image_identifier):
"""Return the ID for the specified image name or identifier.
:param image_identifier: image name or a UUID-like identifier
:returns: the id of the requested :image_identifier:
"""
return self._find_image_id(self.context.tenant_id,
image_identifier)
@os_client.MEMOIZE_FINDER
def _find_image_id(self, tenant_id, image_identifier):
# tenant id in the signature is used for the memoization key,
# that would differentiate similar resource names across tenants.
return self.get_image(image_identifier).id
def get_image(self, image_identifier):
"""Return the image object for the specified image name/id.
:param image_identifier: image name
:returns: an image object with name/id :image_identifier:
"""
try:
return self.client().images.get(image_identifier)
except exc.HTTPNotFound:
return self._find_with_attr('images', name=image_identifier)
class ImageConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exceptions.NotFound, exceptions.NoUniqueMatch)
resource_client_name = CLIENT_NAME
resource_getter_name = 'find_image_by_name_or_id'
|
#!/usr/bin/env python
#from distutils.core import setup
from setuptools import setup
import subprocess
import os
import platform
import re
def get_pi_version():
pi_versions = {
"0002" : "Model B Revision 1.0",
"0003" : "Model B Revision 1.0",
"0004" : "Model B Revision 2.0",
"0005" : "Model B Revision 2.0",
"0006" : "Model B Revision 2.0",
"0007" : "Model A",
"0008" : "Model A",
"0009" : "Model A",
"000d" : "Model B Revision 2.0",
"000e" : "Model B Revision 2.0",
"000f" : "Model B Revision 2.0",
"0010" : "Model B+",
"0011" : "Compute Module",
"0012" : "Model A+",
"a01041" : "Pi 2 Model B",
"a21041" : "Pi 2 Model B",
"900092" : "PiZero",
"a02082" : "Pi3 Model B",
"a22082" : "Pi3 Model B",
}
with open('/proc/cpuinfo', 'r') as cpuinfo:
info = cpuinfo.read()
soc = re.search('^Hardware\s+:\s+(\w+)$', info,flags=re.MULTILINE | re.IGNORECASE)
rev = re.search('^Revision\s+:\s+(\w+)$', info,flags=re.MULTILINE | re.IGNORECASE)
if not soc: #Not a Pi
return None
if soc.group(1).find("BCM") < 0: #Not a Pi
return None
if not rev: | #What are the odds... Still not a pi.
return None
model = pi_versions.get(rev.group(1), "Unknown") #default of Unknown indicates it is likely a pi, but an unknown revision.
return model
DEPENDS_ON = []
if __name__ == "__main__":
backend = {
"Mode | l B Revision 1.0" : "UART",
"Model B Revision 2.0" : "UART",
"Model A": "UART",
"Model B Revision 2.0": "UART",
"Model B+": "UART",
"Compute Module": "UART",
"Model A+": "UART",
"Pi 2 Model B": "UART",
"PiZero": "UART",
"Pi3 Model B" : "I2C",
"Unknown": "I2C",
"unresolved": "I2C"
}
plat = None
if platform.platform().find("Linux") >=0: #We are on linux... Is it a pi?
if os.uname()[4][:3] == 'arm': #probably a pi
plat = get_pi_version()
if plat is None: #oh no! Maybe another SBC?
plat = "unresolved"
if plat is None: #Likely on a PC of some sort...
DEPENDS_ON.append("pyserial==2.7")
elif backend[plat] == "UART":
try:
import serial
except:
DEPENDS_ON.append("pyserial==2.6")
elif backend[plat] == "I2C":
try:
import smbus
except:
#pypi version of smbus does not appear to work. Lets get the version from the Raspbian package repository instead...
if os.geteuid() != 0:
print("Some dependencies should be installed via the Linux package manager but are unable to \
Either run this script with sudo privileges or apt-get the following packages before proceeding to use the pisoc package: \
\ni2c-tools \
\npython-smbus")
else:
proc = subprocess.Popen('apt-get install -y i2c-tools python-smbus', shell=True, stdin=None, stdout=subprocess.PIPE, bufsize = 1, executable = "/bin/bash")
for line in iter(proc.stdout.readline, b''):
print(line.rstrip())
proc.stdout.close()
proc.wait()
setup(name='pisoc',
version='2.0.1',
description='PiSoC Python API',
author='Brian Bradley',
license = 'MIT',
install_requires = DEPENDS_ON,
author_email='bradley@embeditelectronics.com',
packages=['pisoc']
)
|
import os
import subprocess
import requests
import time
from urlparse import urlparse
from config import config
class Server(object):
"""
Simple helper to start/stop and interact with a tests server
TODO: add method to check | what request has been send last by browser
might need this for connect-src testing. To make sure nothing is send
over network
"""
def __init__(self, address, port):
self.address = address
self.port = port
self.logfile_name = config['server_log_filename']
self.log = None
self.log_pointer = 0
| def start(self):
"""
Starts test server with stdout and stderr output to /dev/null
"""
FNULL = open(os.devnull, 'w')
command_line = ['python', 'server/server.py']
self.process = subprocess.Popen(command_line, shell=False,
stdout=FNULL, stderr=FNULL)
self.wait_for_server_to_start()
self.clean_server_log()
def stop(self):
"""
Shutdown test server child process
"""
self.process.terminate()
def wait_for_server_to_start(self, timeout=5):
"""
Waits for server process to start
Raises Exception if server didn't start
TODO: create exceptions class and raise smth like ServerError
"""
end_time = time.time() + timeout
while time.time() < end_time:
if self.server_is_running():
return True
else:
print('Waiting for start...')
time.sleep(1)
raise Exception('Cannot start server')
def server_is_running(self):
"""
Checks if server is running
"""
target_url = 'http://{0}:{1}/ping'.format(self.address, self.port)
try:
response = requests.get(target_url, timeout=1)
except Exception:
return False
if response.status_code == 200 and response.content == 'pong':
return True
else:
print('Got unexpected response form server:')
print('Status: {}\n Content: {}').format(response.status,
response.content)
return False
def clean_server_log(self):
with open(self.logfile_name, 'w') as f:
f.write('')
f.close()
def is_request_received(self, method, url, ignore_query=False):
"""
Method checks if request to specific url has been received by
server.
if path_only set to True, then only query string will be ignored during
comparison.
Returns True if yes otherwise returns False
"""
logs = self.get_new_log_messages()
parsed_logs = self._parse_logs(logs)
result = False
for message in parsed_logs:
if ignore_query:
msg_url = urlparse(message['url'].lower()).path
else:
msg_url = message['url'].lower()
if (method.lower() == message['method'].lower() and
url.lower() == msg_url):
result = True
return result
def update_log_pointer(self):
"""
Method to update log read position in case you want to get latest
logs. e.g. call it before your test to get server log's for test
"""
with open(self.logfile_name, 'r') as f:
f.seek(0, 2)
self.log_pointer = f.tell()
def get_new_log_messages(self):
"""
Method to get new log messages from server log
'new' means since last call for update_log_pointer
"""
with open(self.logfile_name, 'r') as f:
f.seek(self.log_pointer)
messages = f.readlines()
self.log_pointer = f.tell()
return messages
def _parse_logs(self, logs):
"""
Method to parse log messages
Returns array of dict for each log message, parsed by
_parse_log_message method
"""
parsed_logs = []
for log_message in logs:
parsed_logs.append(self._parse_log_message(log_message))
return parsed_logs
@staticmethod
def _parse_log_message(log_message):
"""
Method to parse log message from server log
returns dict {'method': 'method_from_log_message',
'url': 'url_from_log_message'}
"""
url = log_message.split(' ')[6]
method = log_message.split(' ')[5][1:]
return {'method': method,
'url': url}
|
from collections import namedtuple
def get_feature(stats):
feature_attributes = ApacheFeature(
stats['BusyWorkers'],
stats['IdleWorkers'],
stats['waiting_for_co | nnection'],
stats['starting_up'],
stats['reading_request'],
stats['sending_reply'],
stats['keepalive_read'],
stats['dns_lookup'],
stats['closing_connection'],
stats['logging'],
stats['graceful_finishing'],
stats['idle_worker_cleanup'],
stats['BytesPerSec'],
stats['BytesPerReq'],
stats['ReqPerSec'],
stats['Uptime'],
stats['Total_kBytes'],
st | ats['Total_Accesses']
)
return feature_attributes
ApacheFeature = namedtuple('ApacheFeature', [
'BusyWorkers',
'IdleWorkers',
'waiting_for_connection',
'starting_up',
'reading_request',
'sending_reply',
'keepalive_read',
'dns_lookup',
'closing_connection',
'logging',
'graceful_finishing',
'idle_worker_cleanup',
'BytesPerSec',
'BytesPerReq',
'ReqPerSec',
'Uptime',
'Total_kBytes',
'Total_Accesses'
])
|
y(
... publish_messages, channel)
... finally:
... channel.close()
"""
channels = [channel]
class Revival(object):
__name__ = getattr(fun, '__name__', None)
__module__ = getattr(fun, '__module__', None)
__doc__ = getattr(fun, '__doc__', None)
def __init__(self, connection):
self.connection = connection
def revive(self, channel):
channels[0] = channel
def __call__(self, *args, **kwargs):
if channels[0] is None:
self.revive(self.connection.default_channel)
return fun(*args, channel=channels[0], **kwargs), channels[0]
revive = Revival(self)
return self.ensure(revive, revive, **ensure_options)
def create_transport(self):
return self.get_transport_cls()(client=self)
def get_transport_cls(self):
"""Get the currently used transport class."""
transport_cls = self.transport_cls
if not transport_cls or isinstance(transport_cls, string_t):
transport_cls = get_transport_cls(transport_cls)
return transport_cls
def clone(self, **kwargs):
"""Create a copy of the connection with same settings."""
return self.__class__(**dict(self._info(resolve=False), **kwargs))
def get_heartbeat_interval(self):
return self.transport.get_heartbeat_interval(self.connection)
def _info(self, resolve=True):
transport_cls = self.transport_cls
if resolve:
transport_cls = self.resolve_aliases.get(
transport_cls, transport_cls)
D = self.transport.default_connection_params
hostname = self.hostname or D.get('hostname')
if self.uri_prefix:
hostname = '%s+%s' % (self.uri_prefix, hostname)
info = (
('hostname', hostname),
('userid', self.userid or D.get('userid')),
('password', self.password or D.get('password')),
('virtual_host', self.virtual_host or D.get('virtual_host')),
('port', self.port or D.get('port')),
('insist', self.insist),
('ssl', self.ssl),
('transport', transport_cls),
('connect_timeout', self.connect_timeout),
('transport_options', self.transport_options),
('login_method', self.login_method or D.get('login_method')),
('uri_prefix', self.uri_prefix),
('heartbeat', self.heartbeat),
('failover_strategy', self._failover_strategy),
('alternates', self.alt),
)
return info
def info(self):
"""Get connection info."""
return OrderedDict(self._info())
def __eqhash__(self):
return HashedSeq(self.transport_cls, self.hostname, self.userid,
self.password, self.virtual_host, self.port,
repr(self.transport_options))
def as_uri(self, include_password=False, mask='**',
getfields=itemgetter('port', 'userid', 'password',
'virtual_host', 'transport')):
"""Convert connection parameters to URL form."""
hostname = self.hostname or 'localhost'
if self.transport.can_parse_url:
connection_as_uri = self.hostname
if self.uri_prefix:
connection_as_uri = '%s+%s' % (self.uri_prefix, hostname)
if not include_password:
connection_as_uri = maybe_sanitize_url(connection_as_uri)
return connection_as_uri
if self.uri_prefix:
connection_as_uri = '%s+%s' % (self.uri_prefix, hostname)
if not include_password:
connection_as_uri = maybe_sanitize_url(connection_as_uri)
return connection_as_uri
fields = self.info()
port, userid, password, vhost, transport = getfields(fields)
return as_url(
transport, hostname, port, userid, password, quote(vhost),
sanitize=not include_password, mask=mask,
)
def Pool(self, limit=None, **kwargs):
"""Pool of connections.
See Also:
:class:`ConnectionPool`.
Arguments:
limit (int): Maximum number of active connections.
Default is no limit.
Example:
>>> connection = Connection('amqp://')
>>> pool = connection.Pool(2)
>>> c1 = pool.acquire()
>>> c2 = pool.acquire()
>>> c3 = pool.acquire()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "kombu/connection.py", line 354, in acquire
raise ConnectionLimitExceeded(self.limit)
kombu.exceptions.ConnectionLimitExceeded: 2
>>> c1.release()
>>> c3 = pool.acquire()
"""
return ConnectionPool(self, limit, **kwargs)
def ChannelPool(self, limit=None, **kwargs):
"""Pool of channels.
See Also:
:class:`ChannelPool`.
Arguments:
limit (int): Maximum number of active channels.
Default is no limit.
Example:
>>> connection = Connection('amqp://')
>>> pool = connection.ChannelPool(2)
>>> c1 = pool.acquire()
>>> c2 = pool.acquire()
>>> c3 = pool.acquire()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "kombu/connection.py", line 354, in acquire
raise ChannelLimitExceeded(self.limit)
kombu.connection.ChannelLimitExceeded: 2
>>> c1.release()
>>> c3 = pool.acquire()
"""
return ChannelPool(self, limit, **kwargs)
def Producer(self, channel=None, *args, **kwargs):
"""Create new :class:`kombu.Producer` instance."""
from .messaging import Producer
return Producer(channel or self, *args | , **kwargs)
def Consumer(self, queues=None, channel=None, *args, **kwargs):
"""Create new :class:`kombu.Consumer` instance."""
from .messaging import Consumer
return Consumer(channel or self, queues, *args, **kwargs)
def SimpleQueue(self, name, no_ack=None, queue_opts=None,
queue_args=None,
exchange_opts=None, channel=None, **kwargs):
"""Simple persistent queue API. |
Create new :class:`~kombu.simple.SimpleQueue`, using a channel
from this connection.
If ``name`` is a string, a queue and exchange will be automatically
created using that name as the name of the queue and exchange,
also it will be used as the default routing key.
Arguments:
name (str, kombu.Queue): Name of the queue/or a queue.
no_ack (bool): Disable acknowledgments. Default is false.
queue_opts (Dict): Additional keyword arguments passed to the
constructor of the automatically created :class:`~kombu.Queue`.
queue_args (Dict): Additional keyword arguments passed to the
constructor of the automatically created :class:`~kombu.Queue`
for setting implementation extensions (e.g., in RabbitMQ).
exchange_opts (Dict): Additional keyword arguments passed to the
constructor of the automatically created
:class:`~kombu.Exchange`.
channel (ChannelT): Custom channel to use. If not specified the
connection default channel is used.
"""
from .simple import SimpleQueue
return SimpleQueue(channel or self, name, no_ack, queue_opts,
queue_args,
exchange_opts, **kwargs)
def SimpleBuffer(self, name, no_ack=None, queue_opts=None,
queue_args=None,
exchange_opts=None, channel=None, **kwargs):
"""Simple ephemeral queue API.
Create new :class:`~kombu.simpl |
import time
import fractions
from functools import reduce
import logging
class Scheduler:
def __init__(self, jobs):
"""
Create a new Scheduler.
>>> s = Scheduler([Job(1, max, 100, 200)])
>>> for | jobs in s:
... time.sleep(s.tick_duration)
:param jobs: Sequence of jobs to schedule
"""
periodicities = {job.periodicity for job in jobs}
self.tick_duration = reduce(lambda x, y: fractions.gcd(x, y),
periodicities)
self._ticks = self.find_minimum_ticks_required(self.tick_duration,
periodicities)
self._jobs = jobs
self._current | _tick = 0
logging.debug('Scheduler has {} ticks, each one is {} seconds'.
format(self._ticks, self.tick_duration))
@staticmethod
def find_minimum_ticks_required(tick_duration, periodicities):
"""Find the minimum number of ticks required to execute all jobs
at once."""
ticks = 1
for periodicity in reversed(sorted(periodicities)):
if ticks % periodicity != 0:
ticks *= int(periodicity / tick_duration)
return ticks
def __iter__(self):
return self
def __next__(self):
jobs = [job for job in self._jobs
if ((self._current_tick * self.tick_duration)
% job.periodicity) == 0
]
if jobs:
logging.debug('Tick {}, scheduled {}'.
format(self._current_tick, jobs))
self._current_tick += 1
if self._current_tick >= self._ticks:
self._current_tick = 0
for job in jobs:
job()
return jobs
def run(self):
"""Shorthand for iterating over all jobs forever.
>>> print_time = lambda: print(time.time())
>>> s = Scheduler([Job(1, print_time)])
>>> s.run()
1470146095.0748773
1470146096.076028
"""
for _ in self:
time.sleep(self.tick_duration)
class Job:
def __init__(self, periodicity, func, *func_args, **func_kwargs):
"""
Create a new Job to be scheduled and run periodically.
:param periodicity: Number of seconds to wait between job runs
:param func: callable that perform the job action
:param func_args: arguments of the callable
:param func_kwargs: keyword arguments of the callable
"""
if not callable(func):
raise ValueError('func attribute must be callable')
self.periodicity = periodicity
self.func = func
self.func_args = func_args
self.func_kwargs = func_kwargs
def __repr__(self):
try:
name = self.func.__name__
except AttributeError:
name = 'unknown'
return '<Job {} every {} seconds>'.format(name,
self.periodicity)
def __call__(self, *args, **kwargs):
self.func(*self.func_args, **self.func_kwargs)
|
# encoding: utf-8
from django.contrib import auth
from django.contrib.auth import get_user_model
from django.core.exceptions import MultipleObjectsReturned
from django import forms
from django.utils.translation import ugettext_lazy as _
__all__ = ()
class SigninForm(forms.Form):
email = forms.EmailField(required=True, label=_('Email'))
password = forms.CharField(required=True, widget=forms.PasswordInput, label=_('Password'))
error_messages = {
'invalid_login': _('P | lease enter a correct email and password. '
'Note that both fields may be case-sensitive.'),
'inactive': _('This account is inactive.'),
'removed': _('This account is removed.'),
}
def __init__(self, *args, **kwargs):
self.user_cache = None
super(Sign | inForm, self).__init__(*args, **kwargs)
def clean(self):
data = self.cleaned_data
try:
self.user_cache = self.check_user(**data)
except forms.ValidationError as e:
self.add_error('email', e)
return data
@property
def username_field(self):
model = get_user_model()
username_field = model.USERNAME_FIELD
return get_user_model()._meta.get_field(username_field)
def check_user(self, email=None, password=None, **kwargs):
credentials = {self.username_field.name: email,
'password': password}
try:
user = auth.authenticate(**credentials)
except MultipleObjectsReturned:
return
if user is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
if not user.is_active:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
)
if user.is_removed:
raise forms.ValidationError(
self.error_messages['removed'],
code='removed',
)
return user
@property
def user(self):
return self.user_cache
|
from encodings import normalize_encoding, aliases
from types import MappingProxyType
from psycopg2.extensions import encodings as _PG_ENCODING_MAP
PG_ENCODING_MAP = MappingProxyType(_PG_ENCODING_MAP)
# python to postgres encoding map
_PYTHON_ENCODING_MAP = {
v: k for k, v in PG_ENCODING_MAP.items()
}
def ge | t_postgres_encoding(python_encoding: str) -> str:
"""Python to postgres encoding map."""
encoding = normal | ize_encoding(python_encoding.lower())
encoding_ = aliases.aliases[encoding.replace('_', '', 1)].upper()
pg_encoding = PG_ENCODING_MAP[encoding_.replace('_', '')]
return pg_encoding
|
'Odata400Count',
'related_itemodata_navigation_link': 'Odata400IdRef',
'status': 'ResourceStatus'
}
self.attribute_map = {
'line_input_voltage_type': 'LineInputVoltageType',
'member_id': 'MemberId',
'oem': 'Oem',
'power_supply_type': 'PowerSupplyType',
'redundancy': 'Redundancy',
'redundancyodata_count': 'Redundancy@odata.count',
'redundancyodata_navigation_link': 'Redundancy@odata.navigationLink',
'related_item': 'RelatedItem',
'related_itemodata_count': 'RelatedItem@odata.count',
'related_itemodata_navigation_link': 'RelatedItem@odata.navigationLink',
'status': 'Status'
}
self._line_input_voltage_type = None
self._member_id = None
self._oem = None
self._power_supply_type = None
self._redundancy = None
self._redundancyodata_count = None
self._redundancyodata_navigation_link = None
self._related_item = None
self._related_itemodata_count = None
self._related_itemodata_navigation_link = None
self._status = None
@property
def line_input_voltage_type(self):
"""
Gets the line_input_voltage_type of this Power100PowerSupply.
The line voltage type supported as an input to this Power Supply
:return: The line_input_voltage_type of this Power100PowerSupply.
:rtype: Power100LineInputVoltageType
"""
return self._line_input_voltage_type
@line_input_voltage_type.setter
def line_input_voltage_type(self, line_input_voltage_type):
| """
Sets the line_input_voltage_type of this Power100PowerSupply.
The line voltage type supported as an input to this Power Supply
:param line_input_voltage_type: The line_input_voltage_type of this Power100PowerSupp | ly.
:type: Power100LineInputVoltageType
"""
self._line_input_voltage_type = line_input_voltage_type
@property
def member_id(self):
"""
Gets the member_id of this Power100PowerSupply.
This is the identifier for the member within the collection.
:return: The member_id of this Power100PowerSupply.
:rtype: str
"""
return self._member_id
@member_id.setter
def member_id(self, member_id):
"""
Sets the member_id of this Power100PowerSupply.
This is the identifier for the member within the collection.
:param member_id: The member_id of this Power100PowerSupply.
:type: str
"""
self._member_id = member_id
@property
def oem(self):
"""
Gets the oem of this Power100PowerSupply.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:return: The oem of this Power100PowerSupply.
:rtype: ResourceOem
"""
return self._oem
@oem.setter
def oem(self, oem):
"""
Sets the oem of this Power100PowerSupply.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:param oem: The oem of this Power100PowerSupply.
:type: ResourceOem
"""
self._oem = oem
@property
def power_supply_type(self):
"""
Gets the power_supply_type of this Power100PowerSupply.
The Power Supply type (AC or DC)
:return: The power_supply_type of this Power100PowerSupply.
:rtype: Power100PowerSupplyType
"""
return self._power_supply_type
@power_supply_type.setter
def power_supply_type(self, power_supply_type):
"""
Sets the power_supply_type of this Power100PowerSupply.
The Power Supply type (AC or DC)
:param power_supply_type: The power_supply_type of this Power100PowerSupply.
:type: Power100PowerSupplyType
"""
self._power_supply_type = power_supply_type
@property
def redundancy(self):
"""
Gets the redundancy of this Power100PowerSupply.
This structure is used to show redundancy for fans. The Component ids will reference the members of the redundancy groups.
:return: The redundancy of this Power100PowerSupply.
:rtype: list[RedundancyRedundancy]
"""
return self._redundancy
@redundancy.setter
def redundancy(self, redundancy):
"""
Sets the redundancy of this Power100PowerSupply.
This structure is used to show redundancy for fans. The Component ids will reference the members of the redundancy groups.
:param redundancy: The redundancy of this Power100PowerSupply.
:type: list[RedundancyRedundancy]
"""
self._redundancy = redundancy
@property
def redundancyodata_count(self):
"""
Gets the redundancyodata_count of this Power100PowerSupply.
:return: The redundancyodata_count of this Power100PowerSupply.
:rtype: Odata400Count
"""
return self._redundancyodata_count
@redundancyodata_count.setter
def redundancyodata_count(self, redundancyodata_count):
"""
Sets the redundancyodata_count of this Power100PowerSupply.
:param redundancyodata_count: The redundancyodata_count of this Power100PowerSupply.
:type: Odata400Count
"""
self._redundancyodata_count = redundancyodata_count
@property
def redundancyodata_navigation_link(self):
"""
Gets the redundancyodata_navigation_link of this Power100PowerSupply.
:return: The redundancyodata_navigation_link of this Power100PowerSupply.
:rtype: Odata400IdRef
"""
return self._redundancyodata_navigation_link
@redundancyodata_navigation_link.setter
def redundancyodata_navigation_link(self, redundancyodata_navigation_link):
"""
Sets the redundancyodata_navigation_link of this Power100PowerSupply.
:param redundancyodata_navigation_link: The redundancyodata_navigation_link of this Power100PowerSupply.
:type: Odata400IdRef
"""
self._redundancyodata_navigation_link = redundancyodata_navigation_link
@property
def related_item(self):
"""
Gets the related_item of this Power100PowerSupply.
The ID(s) of the resources associated with this Power Limit
:return: The related_item of this Power100PowerSupply.
:rtype: list[Odata400IdRef]
"""
return self._related_item
@related_item.setter
def related_item(self, related_item):
"""
Sets the related_item of this Power100PowerSupply.
The ID(s) of the resources associated with this Power Limit
:param related_item: The related_item of this Power100PowerSupply.
:type: list[Odata400IdRef]
"""
self._related_item = related_item
@property
def related_itemodata_count(self):
"""
Gets the related_itemodata_count of this Power100PowerSupply.
:return: The related_itemodata_count of this Power100PowerSupply.
:rtype: Odata400Count
"""
return self._related_itemodata_count
@related_itemodata_count.setter
def related_itemodata_count(self, related_itemodata_count):
"""
Sets the related_itemodata_count of this Power100PowerSupply.
:param related_itemodata_count: The related_itemodata_count of this Power100PowerSupply.
:type: Odata400Count
"""
self._related_itemodata_count = related_itemodata_count
@property
def related_itemodata_navigation_link(self):
"""
Gets the related_itemodata_navigation_link of this Power100PowerSupply.
:return: The related_itemodata_navigation_link of this Power100PowerSupply.
:rtype: Odata400IdRef
"""
return self._related_itemodata_navigation_link
@related_itemodata_navigation_link.setter
def related_itemodata_navigation_link(self, related_itemodata_navigation_ |
from office365.directory.identities.identity_set import IdentitySet
from office365.entity import Entity
|
class CallRecord(Entity):
"""Represents a single peer-to-peer call or a group call between multiple participants,
sometimes referred to as an online meeting."""
@property
def join_web_url(self):
"""Meeting URL associated to the call. May not be available for a peerToPeer call record type."""
return self.properties.get("joinWebUrl", None)
@property
def organizer(self):
"""The organizing party's identity.."""
retur | n self.properties.get("organizer", IdentitySet())
|
l.split(':')[-1]
else:
f.append(l.strip())
self.files_dict.append(f)
fd.close()
sysinfodir = os.path.join(os.path.dirname(files[0]), "../../sysinfo/")
sysinfodir = os.path.realpath(sysinfodir)
cpuinfo = commands.getoutput("cat %s/cpuinfo" % sysinfodir)
lscpu = commands.getoutput("cat %s/lscpu" % sysinfodir)
meminfo = commands.getoutput("cat %s/meminfo" % sysinfodir)
lspci = commands.getoutput("cat %s/lspci_-vvnn" % sysinfodir)
partitions = commands.getoutput("cat %s/partitions" % sysinfodir)
fdisk = commands.getoutput("cat %s/fdisk_-l" % sysinfodir)
status_path = os.path.join(os.path.dirname(files[0]), "../status")
status_file = open(status_path, 'r')
content = status_file.readlines()
self.testdata = re.findall("localtime=(.*)\t", content[-1])[-1]
cpunum = len(re.findall("processor\s+: \d", cpuinfo))
cpumodel = re.findall("Model name:\s+(.*)", lscpu)
socketnum = int(re.findall("Socket\(s\):\s+(\d+)", lscpu)[0])
corenum = int(re.findall("Core\(s\) per socket:\s+(\d+)", lscpu)[0]) * socketnum
threadnum = int(re.findall("Thread\(s\) per core:\s+(\d+)", lscpu)[0]) * corenum
numanodenum = int(re.findall("NUMA node\(s\):\s+(\d+)", lscpu)[0])
memnum = float(re.findall("MemTotal:\s+(\d+)", meminfo)[0]) / 1024 / 1024
nicnum = len(re.findall("\d+:\d+\.0 Ethernet", lspci))
disknum = re.findall("sd\w+\S", partitions)
fdiskinfo = re.findall("Disk\s+(/dev/sd.*\s+GiB),", fdisk)
elif sample_type == 'database':
jobid = arg
self.kvmver = get_test_keyval(jobid, "kvm-userspace-ver")
self.hostkernel = get_test_keyval(jobid, "kvm_version")
self.guestkernel = get_test_keyval(jobid, "guest-kernel-ver")
self.len = get_test_keyval(jobid, "session-length")
self.categories = get_test_keyval(jobid, "category")
idx = exec_sql("select job_idx from tko_jobs where afe_job_id=%s"
% jobid)[-1]
data = exec_sql("select test_idx,iteration_key,iteration_value"
" from tko_perf_view where job_idx=%s" % idx)
testidx = None
job_dict = []
test_dict = []
for l in data:
s = l.split()
if not testidx:
testidx = s[0]
if testidx != s[0]:
job_dict.append(generate_raw_table(test_dict))
test_dict = []
testidx = s[0]
test_dict.append(' | '.join(s[1].split('--')[0:] + s[-1:]))
job_dict.append(generate_raw_table(test_dict))
self.files_dict = job_dict
self.version = " userspace: %s\n host kernel: %s\n guest kernel: %s" % (
self.kvmver, self.hostkernel, self.guestkernel)
nrepeat = len(self.files_dict)
if nrepeat < 2:
print "`nrepeat' should be larger than 1!"
sys.exit(1)
| self.desc = """<hr>Machine Info:
o CPUs(%s * %s), Cores(%s), Threads(%s), Sockets(%s),
o NumaNodes(%s), Memory(%.1fG), NICs(%s)
o Disks(%s | %s)
Please check sysinfo directory in autotest result to get | more details.
(eg: http://autotest-server.com/results/5057-autotest/host1/sysinfo/)
<hr>""" % (cpunum, cpumodel, corenum, threadnum, socketnum, numanodenum, memnum, nicnum, fdiskinfo, disknum)
self.desc += """ - Every Avg line represents the average value based on *%d* repetitions of the same test,
and the following SD line represents the Standard Deviation between the *%d* repetitions.
- The Standard deviation is displayed as a percentage of the average.
- The significance of the differences between the two averages is calculated using unpaired T-test that
takes into account the SD of the averages.
- The paired t-test is computed for the averages of same category.
""" % (nrepeat, nrepeat)
def getAvg(self, avg_update=None):
return self._process_files(self.files_dict, self._get_list_avg,
avg_update=avg_update)
def getAvgPercent(self, avgs_dict):
return self._process_files(avgs_dict, self._get_augment_rate)
def getSD(self):
return self._process_files(self.files_dict, self._get_list_sd)
def getSDRate(self, sds_dict):
return self._process_files(sds_dict, self._get_rate)
def getTtestPvalue(self, fs_dict1, fs_dict2, paired=None, ratio=None):
"""
scipy lib is used to compute p-value of Ttest
scipy: http://www.scipy.org/
t-test: http://en.wikipedia.org/wiki/Student's_t-test
"""
try:
from scipy import stats
import numpy as np
except ImportError:
print "No python scipy/numpy library installed!"
return None
ret = []
s1 = self._process_files(fs_dict1, self._get_list_self, merge=False)
s2 = self._process_files(fs_dict2, self._get_list_self, merge=False)
# s*[line][col] contians items (line*col) of all sample files
for line in range(len(s1)):
tmp = []
if type(s1[line]) != list:
tmp = s1[line]
else:
if len(s1[line][0]) < 2:
continue
for col in range(len(s1[line])):
avg1 = self._get_list_avg(s1[line][col])
avg2 = self._get_list_avg(s2[line][col])
sample1 = np.array(s1[line][col])
sample2 = np.array(s2[line][col])
warnings.simplefilter("ignore", RuntimeWarning)
if (paired):
if (ratio):
(_, p) = stats.ttest_rel(np.log(sample1), np.log(sample2))
else:
(_, p) = stats.ttest_rel(sample1, sample2)
else:
(_, p) = stats.ttest_ind(sample1, sample2)
flag = "+"
if float(avg1) > float(avg2):
flag = "-"
tmp.append(flag + "%f" % (1 - p))
tmp = "|".join(tmp)
ret.append(tmp)
return ret
def _get_rate(self, data):
""" num2 / num1 * 100 """
result = "0.0"
if len(data) == 2 and float(data[0]) != 0:
result = float(data[1]) / float(data[0]) * 100
if result > 100:
result = "%.2f%%" % result
else:
result = "%.4f%%" % result
return result
def _get_augment_rate(self, data):
""" (num2 - num1) / num1 * 100 """
result = "+0.0"
if len(data) == 2 and float(data[0]) != 0:
result = (float(data[1]) - float(data[0])) / float(data[0]) * 100
if result > 100:
result = "%+.2f%%" % result
else:
result = "%+.4f%%" % result
return result
def _get_list_sd(self, data):
"""
sumX = x1 + x2 + ... + xn
avgX = sumX / n
sumSquareX = x1^2 + ... + xn^2
SD = sqrt([sumSquareX - (n * (avgX ^ 2))] / (n - 1))
"""
o_sum = sqsum = 0.0
n = len(data)
for i in data:
o_sum += float(i)
sqsum += float(i) ** 2
avg = o_sum / n
if avg == 0 or n == 1 or sqsum - (n * avg ** 2) <= 0:
return "0.0"
return "%f" % (((sqsum - (n * avg ** 2)) / (n - 1)) ** 0.5)
def _get_list_avg(self, data):
""" Compute the average of list entries """
o_sum = 0.0
for i in data:
o_sum += float(i)
return "%f" % (o_sum / len(data))
def _get_list_self(self, data):
""" Use this to convert sample dicts """
return data
def _process_lines(self, files_dict, row, func, avg_update, merge):
""" Use unified |
basePos, baseOrn)
localPos = [
localPos[0] - rootPosRel[0], localPos[1] - rootPosRel[1], localPos[2] - rootPosRel[2]
]
#print("localPos=",localPos)
stateVector.append(rootPosRel[1])
#self.pb2dmJoints=[0,1,2,9,10,11,3,4,5,12,13,14,6,7,8]
self.pb2dmJoints = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
linkIndicesSim = []
for pbJoint in range(self._pybullet_client.getNumJoints(self._sim_model)):
linkIndicesSim.append(self.pb2dmJoints[pbJoint])
linkStatesSim = self._pybullet_client.getLinkStates(self._sim_model, linkIndicesSim, computeForwardKinematics=True, computeLinkVelocity=True)
for pbJoint in range(self._pybullet_client.getNumJoints(self._sim_model)):
j = self.pb2dmJoints[pbJoint]
#print("joint order:",j)
#ls = self._pybullet_client.getLinkState(self._sim_model, j, computeForwardKinematics=True)
ls = linkStatesSim[pbJoint]
linkPos = ls[0]
linkOrn = ls[1]
linkPosLocal, linkOrnLocal = self._pybullet_client.multiplyTransforms(
rootTransPos, rootTransOrn, linkPos, linkOrn)
if (linkOrnLocal[3] < 0):
linkOrnLocal = [-linkOrnLocal[0], -linkOrnLocal[1], -linkOrnLocal[2], -linkOrnLocal[3]]
linkPosLocal = [
linkPosLocal[0] - rootPosRel[0], linkPosLocal[1] - rootPosRel[1],
linkPosLocal[2] - rootPosRel[2]
]
for l in linkPosLocal:
stateVector.append(l)
#re-order the quaternion, DeepMimic uses w,x,y,z
if (linkOrnLocal[3] < 0):
linkOrnLocal[0] *= -1
linkOrnLocal[1] *= -1
linkOrnLocal[2] *= -1
linkOrnLocal[3] *= -1
stateVector.append(linkOrnLocal[3])
stateVector.append(linkOrnLocal[0])
stateVector.append(linkOrnLocal[1])
stateVector.append(linkOrnLocal[2])
for pbJoint in range(self._pybullet_client.getNumJoints(self._sim_model)):
j = self.pb2dmJoints[pbJoint]
#ls = self._pybullet_client.getLinkState(self._sim_model, j, computeLinkVelocity=True)
ls = linkStatesSim[pbJoint]
linkLinVel = ls[6]
linkAngVel = ls[7]
linkLinVelLocal, unused = self._pybullet_client.multiplyTransforms([0, 0, 0], rootTransOrn,
linkLinVel, [0, 0, 0, 1])
#linkLinVelLocal=[linkLinVelLocal[0]-rootPosRel[0],linkLinVelLocal[1]-rootPosRel[1],linkLinVelLocal[2]-rootPosRel[2]]
linkAngVelLocal, unused = self._pybullet_client.multiplyTransforms([0, 0, 0], rootTransOrn,
linkAngVel, [0, 0, 0, 1])
for l in linkLinVelLocal:
stateVector.append(l)
for l in linkAngVelLocal:
stateVector.append(l)
#print("stateVector len=",len(stateVector))
#for st in range (len(stateVector)):
# print("state[",st,"]=",stateVector[st])
return stateVector
def terminates(self):
#check if any non-allowed body part hits the ground
terminates = False
pts = self._pybullet_client.getContactPoints()
for p in pts:
part = -1
#ignore self-collision
if (p[1] == p[2]):
continue
if (p[1] == self._sim_model):
part = p[3]
if (p[2] == self._sim_model):
part = p[4]
if (part >= 0 and part in self._fall_contact_body_parts):
#print("terminating part:", part)
terminates = True
return terminates
def quatMul(self, q1, q2):
return [
q1[3] * q2[0] + q1[0] * q2[3] + q1[1] * q2[2] - q1[2] * q2[1],
q1[3] * q2[1] + q1[1] * q2[3] + q1[2] * q2[0] - q1[0] * q2[2],
q1[3] * q2[2] + q1[2] * q2[3] + q1[0] * q2[1] - q1[1] * q2[0],
q1[3] * q2[3] - q1[0] * q2[0] - q1[1] * q2[1] - q1[2] * q2[2]
]
def calcRootAngVelErr(self, vel0, vel1):
diff = [vel0[0] - vel1[0], vel0[1] - vel1[1], vel0[2] - vel1[2]]
return diff[0] * diff[0] + diff[1] * diff[1] + diff[2] * diff[2]
def calcRootRotDiff(self, orn0, orn1):
orn0Conj = [-orn0[0], -orn0[1], -orn0[2], orn0[3]]
q_diff = self.quatMul(orn1, orn0Conj)
axis, angle = self._pybullet_client.getAxisAngleFromQuaternion(q_diff)
return angle * angle
def getReward(self, pose):
"""Compute and return the pose-based reward."""
#from DeepMimic double cSceneImitate::CalcRewardImitate
#todo: compensate for ground height in some parts, once we move to non-flat terrain
# not values from the paper, but from the published code.
pose_w = 0.5
vel_w = 0.05
end_eff_w = 0.15
# does not exist in paper
root_w = 0.2
if self._useComReward:
com_w = 0.1
else:
com_w = 0
total_w = pose_w + vel_w + end_eff_w + root_w + com_w
pose_w /= total_w
vel_w /= total_w
end_eff_w /= total_w
root_w /= total_w
com_w /= total_w
pose_scale = 2
vel_scale = 0.1
end_eff_scale = 40
root_scale = 5
com_scale = 10
err_scale = 1 # error scale
reward = 0
pose_err = 0
vel_err = 0
end_eff_err = 0
root_err = 0
com_err = 0
heading_err = 0
#create a mimic reward, comparing the dynamics humanoid with a kinematic one
#pose = self.InitializePoseFromMotionData()
#print("self._kin_model=",self._kin_model)
#print("kinematicHumanoid #joints=",self._pybullet_client.getNumJoints(self._kin_model))
#self.ApplyPose(pose, True, True, self._kin_model, self._pybullet_client)
#const Eigen::VectorXd& pose0 = sim_char.GetPose();
#const Eigen::VectorXd& vel0 = sim_char.GetVel();
#const Eigen::VectorXd& pose1 = kin_char.GetPose();
#const Eigen::VectorXd& vel1 = kin_char.GetVel();
#tMatrix origin_trans = sim_char.BuildOriginTrans();
#tMatrix kin_origin_trans = kin_char.BuildOriginTrans();
#
#tVector com0_world = sim_char.CalcCOM();
if self._useComReward:
comSim, comSimVel = self.computeCOMposVel(self._sim_model)
comKin, comKinVel = self.computeCOMposVel(self._kin_model)
#tVector com_vel0_world = sim_char.CalcCOMVel();
#tVector com1_world;
#tVector com_vel1_world;
#cRBDUtil::CalcCoM(joint_mat, body_defs, pose1, vel1, com1_world, com_vel1_world);
#
root_id = 0
#tVector root_pos0 = cKinTree::GetRootPos(joint_mat, pose0);
#tVector root_pos1 = cKinTree::GetRootPos(joint_mat, pose1);
#tQuaternion root_rot0 = cKinTree::GetRootRot(joint_mat, pose0);
#tQuaternion root_rot1 = cKinTree::GetRootRot(joint_mat, pose1);
#tVector root_vel0 = cKinTree::GetRootVel(joint_mat, vel0);
#tVector root_vel1 = cKinTree::GetRootVel(joint_mat, vel1);
#tVector root_ang_vel0 = cKinTree::GetRootAngVel(joint_mat, vel0);
#tVector root_ang_vel1 = cKinTree::GetRootAngVel(joint_mat, vel1);
mJointWeights = [
0.20833, 0.10416, 0.0625, 0.10416, 0.0625, 0.041666666666666671, 0.0625, 0.0416, 0.00,
0.10416, 0.0625, 0.0416, 0.0625, 0.0416, 0.0000
]
num_end_effs = 0
num_joints = 15
root_rot_w = mJointWeights[root_id]
rootPosSim, rootOrnSim = self._pybullet_client.getBasePositionAndOrientation(self._sim_model)
rootPosKin, rootOrnKin = self._pybullet_client.getBasePositionAndOrientation(self._kin_model)
linVelSim, angVelSim = self._pybullet_client.ge | tBaseVelocity(self._sim_model)
#don't read the velocities from the k | inematic model (they are zero), use the pose interpolator velocity
#see also issue https://github.com/bulletphysics/bullet3/issues/2401
linVelKin = self._poseInterpolator._baseLinVel
angVelKin = self._poseInterpolator._baseAngVel
root_rot_err = self.calcRootRotDiff(rootOrnSim, rootOrnKin)
pose_err += root_rot_w * root_rot_err
root_vel_diff = [
linVelSim[0] - linVelKin[0], linVelSim[1] - linVelKin[1], linVelSim[2] - linVelKin[2]
]
root_vel_err = root_vel_diff[0] * root_vel_diff[0] + root_vel_diff[1] * root_vel_diff[
1] + root_vel_diff[2] * root_vel_diff[2]
root_ang_vel_err = self.calcRootAngVelErr(angVelSim, angVelKin)
vel_err += root_rot_w * root_ang_vel_err
useArray = True
if useArray:
join |
import os
import sys
import yaml
from etllib.conf import Conf
from etllib.yaml_helper import YAMLHelper
from plugins import PluginEngine
class RulesEngine(list):
def __init__(self):
self.rules_path = os.path.dirname(os.path.realpath(__file__))
self.conf = Conf()
self.load()
self.filter_recursion()
self.pe = PluginEngine()
def parse_rule_file(self, file_path):
yaml_data = YAMLHelper(file_path).read()
yaml_data['rule_name'] = os.path.split(file_path)[1]
if yaml_data['rule_type'] == 'group':
# Group Rule, i.e. with child rules
pass
else:
# Single Rule, i.e. with no child rules
# Get Data Nodes parameters from Config file
src = yaml_data['source_node']
dst = yaml_data['destination_node']
yaml_data['source_node'] = self.conf.get_data_nodes(src)
yaml_data['destination_node'] = self.conf.get_data_nodes(dst)
return yaml_data
def load(self):
rule_files = [os.path.join(self.rules_path, f)
for f in os.listdir(self.rules_path)
if os.path.isfile(os.path.join(self.rules_path, f))
and f.endswith('.yml')
]
for rule_file in rule_files:
self.append(self.parse_rule_file(rule_file))
def filter_recursion(self):
# Filter out group rules with members of type groups
for rule in self:
if rule['rule_type'] == 'group':
rule_members = [
child for child in rule['members']
if self.get_rule_by_name(child)['rule_type'] == 'single'
]
rule['members'] = rule_members
def get_rule_by_name(self, rule_name):
for rule in self:
if rule['rule_name'] == rule_name:
return rule
#print 'rule not found'
def expand_action(self, action):
if isinstance(action, str):
if action.startswith('$rule:'):
_, subrule_name, subrule_field = action.strip().split(':')
subrule = self.get_rule_by_name(subrule_name)
return self.apply_rule_ingress(subrule)[subrule_field]
else:
return action
elif isinstance(action, dict):
for key, val in action.iteritems():
action[key] = self.expand_action(val)
return action
else:
return action
def apply_rule_ingress(self, rule):
ingress_plugin_name = rule['ingress_plugin']
ingress_plugin_runnable = self.pe[ingress_plugin_name].init(rule)
data = ingress_plugin_runnable.run(rule, None)
ingress_plugin_runnable.exit()
return data
def apply_rule_egress(self, rule, data):
egress_plugin_name = rule['egress_plugin']
egress_plugin_runnable = self.pe[egress_plugin_name].init(rule)
egress_plugin_runnable.run(rule, data)
egress_plugin_runnable.exit()
def apply_data_processors(self, rule, data):
if not rule.get('data_processors', False):
return data
if type(rule['data_processors']) is str:
data_processors = [rule['data_processors']]
else:
data_processors = rule['data_processors']
for processor_plugin_name in data_processors:
processor_plugin_runnable = self.pe[processor_plugin_name].init(rule)
data = processor_plugin_runnable.run(rule, data)
processor_plugin_runnable.exit()
return data
def apply_rule(self, rule):
print 'Applying {0}'.format(rule['rule_name'])
if rule['rule_type'] == 'single':
rule['action'] = self.expand_action(rule['action'])
data = self.apply_rule_ingress(rule)
data = self.apply_data_processors(rule, data)
self.apply_rule_egress(rule, data)
else:
for child_rule_name in rule['members']:
self.apply_rule_by_name(child_rule_name)
def apply_rule_by_name(self, rule_name):
for rule in self:
if rule['rule_name'] = | = rule_name:
self.apply_rule(rule)
| break
else:
sys.exit('Error! Rule not found')
def apply_rules(self):
for rule in self:
if rule['active']:
self.apply_rule(rule)
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
pyparsing module - Classes and methods to define and execute parsing grammars
=============================================================================
The pyparsing module is an alternative approach to creating and
executing simple grammars, vs. the traditional lex/yacc approach, or the
use of regular expressions. With pyparsing, you don't | need to learn
a new syntax for defining grammars or matching expressions - the parsing
module provides a library of classes that you use to construct the
grammar directly in | Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
``"<salutation>, <addressee>!"``), built up using :class:`Word`,
:class:`Literal`, and :class:`And` elements
(the :meth:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,
and the strings are auto-converted to :class:`Literal` expressions)::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print(hello, "->", greet.parse_string(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the
self-explanatory class names, and the use of :class:`'+'<And>`,
:class:`'|'<MatchFirst>`, :class:`'^'<Or>` and :class:`'&'<Each>` operators.
The :class:`ParseResults` object returned from
:class:`ParserElement.parseString` can be
accessed as a nested list, a dictionary, or an object with named
attributes.
The pyparsing module handles some of the problems that are typically
vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle
"Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
Getting Started -
-----------------
Visit the classes :class:`ParserElement` and :class:`ParseResults` to
see the base classes that most other pyparsing
classes inherit from. Use the docstrings for examples of how to:
- construct literal match expressions from :class:`Literal` and
:class:`CaselessLiteral` classes
- construct character word-group expressions using the :class:`Word`
class
- see how to create repetitive expressions using :class:`ZeroOrMore`
and :class:`OneOrMore` classes
- use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,
and :class:`'&'<Each>` operators to combine simple expressions into
more complex ones
- associate names with your parsed results using
:class:`ParserElement.setResultsName`
- access the parsed data, which is returned as a :class:`ParseResults`
object
- find some helpful expression short-cuts like :class:`delimitedList`
and :class:`oneOf`
- find more useful common expressions in the :class:`pyparsing_common`
namespace class
"""
from typing import NamedTuple
class version_info(NamedTuple):
major: int
minor: int
micro: int
releaselevel: str
serial: int
@property
def __version__(self):
return "{}.{}.{}".format(self.major, self.minor, self.micro) + (
"{}{}{}".format(
"r" if self.releaselevel[0] == "c" else "",
self.releaselevel[0],
self.serial,
),
"",
)[self.releaselevel == "final"]
def __str__(self):
return "{} {} / {}".format(__name__, self.__version__, __version_time__)
def __repr__(self):
return "{}.{}({})".format(
__name__,
type(self).__name__,
", ".join("{}={!r}".format(*nv) for nv in zip(self._fields, self)),
)
__version_info__ = version_info(3, 0, 6, "final", 0)
__version_time__ = "12 Nov 2021 16:06 UTC"
__version__ = __version_info__.__version__
__versionTime__ = __version_time__
__author__ = "Paul McGuire <ptmcg.gm+pyparsing@gmail.com>"
from .util import *
from .exceptions import *
from .actions import *
from .core import __diag__, __compat__
from .results import *
from .core import *
from .core import _builtin_exprs as core_builtin_exprs
from .helpers import *
from .helpers import _builtin_exprs as helper_builtin_exprs
from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode
from .testing import pyparsing_test as testing
from .common import (
pyparsing_common as common,
_builtin_exprs as common_builtin_exprs,
)
# define backward compat synonyms
if "pyparsing_unicode" not in globals():
pyparsing_unicode = unicode
if "pyparsing_common" not in globals():
pyparsing_common = common
if "pyparsing_test" not in globals():
pyparsing_test = testing
core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs
__all__ = [
"__version__",
"__version_time__",
"__author__",
"__compat__",
"__diag__",
"And",
"AtLineStart",
"AtStringStart",
"CaselessKeyword",
"CaselessLiteral",
"CharsNotIn",
"Combine",
"Dict",
"Each",
"Empty",
"FollowedBy",
"Forward",
"GoToColumn",
"Group",
"IndentedBlock",
"Keyword",
"LineEnd",
"LineStart",
"Literal",
"Located",
"PrecededBy",
"MatchFirst",
"NoMatch",
"NotAny",
"OneOrMore",
"OnlyOnce",
"OpAssoc",
"Opt",
"Optional",
"Or",
"ParseBaseException",
"ParseElementEnhance",
"ParseException",
"ParseExpression",
"ParseFatalException",
"ParseResults",
"ParseSyntaxException",
"ParserElement",
"PositionToken",
"QuotedString",
"RecursiveGrammarException",
"Regex",
"SkipTo",
"StringEnd",
"StringStart",
"Suppress",
"Token",
"TokenConverter",
"White",
"Word",
"WordEnd",
"WordStart",
"ZeroOrMore",
"Char",
"alphanums",
"alphas",
"alphas8bit",
"any_close_tag",
"any_open_tag",
"c_style_comment",
"col",
"common_html_entity",
"counted_array",
"cpp_style_comment",
"dbl_quoted_string",
"dbl_slash_comment",
"delimited_list",
"dict_of",
"empty",
"hexnums",
"html_comment",
"identchars",
"identbodychars",
"java_style_comment",
"line",
"line_end",
"line_start",
"lineno",
"make_html_tags",
"make_xml_tags",
"match_only_at_col",
"match_previous_expr",
"match_previous_literal",
"nested_expr",
"null_debug_action",
"nums",
"one_of",
"printables",
"punc8bit",
"python_style_comment",
"quoted_string",
"remove_quotes",
"replace_with",
"replace_html_entity",
"rest_of_line",
"sgl_quoted_string",
"srange",
"string_end",
"string_start",
"trace_parse_action",
"unicode_string",
"with_attribute",
"indentedBlock",
"original_text_for",
"ungroup",
"infix_notation",
"locatedExpr",
"with_class",
"CloseMatch",
"token_map",
"pyparsing_common",
"pyparsing_unicode",
"unicode_set",
"condition_as_parse_action",
"pyparsing_test",
# pre-PEP8 compatibility names
"__versionTime__",
"anyCloseTag",
"anyOpenTag",
"cStyleComment",
"commonHTMLEntity",
"countedArray",
"cppStyleComment",
"dblQuotedString",
"dblSlashComment",
"delimitedList",
"dictOf",
"htmlComment",
"javaStyleComment",
"lineEnd",
"lineStart",
"makeHTMLTags",
"makeXMLTags",
"matchOnlyAtCol",
"matchPreviousExpr",
"matchPreviousLiteral",
"nestedExpr",
"nullDebugAction",
"oneOf",
"opAssoc",
"pythonStyleComment",
"quotedString",
"removeQuotes",
"replaceHTMLEntity",
"replaceWith",
"restOfLine",
"sglQuotedString",
"stringEnd",
"stringStart",
"traceParseAction",
"unicodeString",
"withAttribute",
"indentedBlock",
"originalTextFor",
|
#
# Copyright 2011-2019 Universidad Complutense de Madrid
#
# This file is part of Numina
#
# SPDX-License-Identifier: GPL-3.0+
# License-Filename: LICENSE.txt
#
"""Import objects by name"""
import importlib
import inspect
import warnings
def import_object(path):
"""Import an object given its fully qualified name."""
spl = path.split('.')
if len(spl) == 1:
return importlib.import_module(path)
# avoid last part for the moment
cls = spl[-1]
mods = '.'.join(spl[:-1])
mm = importlib.import_module | (mods)
# try to get the last part as an attribute
try:
obj = getattr(mm, cls)
ret | urn obj
except AttributeError:
pass
# Try to import the last part
rr = importlib.import_module(path)
return rr
def fully_qualified_name(obj, sep='.'):
warnings.warn(
"use numina.util.fqn.fully_qualified_name instead",
DeprecationWarning, stacklevel=2
)
import numina.util.fqn as fqn
return fqn.fully_qualified_name(obj, sep)
|
# Python3
import numpy as np
import math
def nCr(n,r):
f = math.factorial
return f(n) // f(r) // f(n-r)
def long_to_int64_array(val, ln):
sz = ln / 64 + 1
ar = np.zeros(sz, dtype=int)
i64 = 2**64 - 1
for ii in range(sz):
ar[ii] = int(val & i64)
val = val >> 64
return ar
def int64_array_ro_long(ar):
val = long(0)
for ii in range(ar.shape[0]):
val = val | ar[-ii - 1]
print(val)
if ii < ar.shape[0] - 1:
val = val << 64
print(val)
return val
def count(a_l, a, inverse=False):
"""
It returns the number of elements which are equal to
the target value.
In order to resolve when x is an array with more than
one dimensions, converstion from array to list is used.
"""
if inverse is False:
x = np.where(np.array(a_l) == a)
else:
x = np.where(np.array(a_l) != a)
# return len(x[ ].tolist())
return len(x)
def autocorr(x):
result = np.correlate(x, x, mode='full')
return result[int(result.size / 2):]
def autocorr_kmlee_k(y, k):
ybar = np.mean(y)
N = len(y)
# print( N)
# cross_sum = np.zeros((N-k,1))
cross_sum = np.zeros(N - k)
# print( cross_sum.shape, N, k)
# Numerator, unscaled covariance
# [Matlab] for i = (k+1):N
for i in range(k, N):
# [Matlab] cross_sum(i) = (y(i)-ybar)*(y(i-k)-ybar) ;
# print( cross_sum.shape, i, k, N)
# print( cross_sum[i-k])
# print( (y[i]-ybar)*(y[i-k]-ybar))
# cross_sum[i] = (y[i]-ybar)*(y[i-k]-ybar)
cross_sum[i - k] = (y[i] - ybar) * (y[i - k] - ybar)
# Denominator, unscaled variance
yvar = np.dot(y - ybar, y - ybar)
ta2 = np.sum(cross_sum | ) / yvar
return ta2
def autocorr_kmlee(y, p=None):
if p is None:
p = len(y)
# The results
# ta = np.zeros((p,1))
ta = np.zeros(p)
# global N
N = len(y)
ybar = np.mean(y)
# Generate ACFs at each lag i
for i in | range(p):
ta[i] = autocorr_kmlee_k(y, i)
return ta
def autocorrelate(x, method='numpy'):
"""
Multiple approaches are considered.
# kmlee method
function ta2 = acf_k(y,k)
% ACF_K - Autocorrelation at Lag k
% acf(y,k)
%
% Inputs:
% y - series to compute acf for
% k - which lag to compute acf
%
global ybar
global N
cross_sum = zeros(N-k,1) ;
% Numerator, unscaled covariance
for i = (k+1):N
cross_sum(i) = (y(i)-ybar)*(y(i-k)-ybar) ;
end
% Denominator, unscaled variance
yvar = (y-ybar)'*(y-ybar) ;
ta2 = sum(cross_sum) / yvar ;
"""
if method == 'numpy':
return autocorr(x)
elif method == 'zeropadding':
return np.correlate(x, x, mode='full')
elif method == 'kmlee':
return autocorr_kmlee(x)
def autocorrelate_m(X_org, method):
"""
autocorrelate_m(X_org, method)
Inputs
======
method, string
'numpy', 'zeropadding', 'kmlee'
"""
X_l = []
for i in range(X_org.shape[0]):
x_org = X_org[i, :]
x_ac = autocorrelate(x_org, method=method)
X_l.append(x_ac)
X = np.array(X_l)
return X
|
# -*- coding: utf-8 -*-
import unittest
import os # noqa: F401
import json # noqa: F401
import time
import requests
from os import environ
try:
from ConfigParser import ConfigParser # py2
except:
from configparser import ConfigParser # py3
from pprint import pprint # noqa: F401
from biokbase.workspace.client import Workspace as workspaceService
from eapearson_TestRichReports.eapearson_TestRichReportsImpl import eapearson_TestRichReports
from eapearson_TestRichReports.eapearson_TestRichReportsServer import MethodContext
class eapearson_TestRichReportsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
token = environ.get('KB_AUTH_TOKEN', None)
user_id = requests.post(
| 'https://kbase.us/services/authorization/Sessions/Login',
data='token={}&fields=user_id'.format(token)).json()['user_id']
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'user_id': user_id,
'provenance': [
{'service': 'eapearson_TestRichReports',
| 'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('eapearson_TestRichReports'):
cls.cfg[nameval[0]] = nameval[1]
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = workspaceService(cls.wsURL, token=token)
cls.serviceImpl = eapearson_TestRichReports(cls.cfg)
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
if hasattr(self.__class__, 'wsName'):
return self.__class__.wsName
suffix = int(time.time() * 1000)
wsName = "test_eapearson_TestRichReports_" + str(suffix)
ret = self.getWsClient().create_workspace({'workspace': wsName}) # noqa
self.__class__.wsName = wsName
return wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
# NOTE: According to Python unittest naming rules test method names should start from 'test'. # noqa
def test_filter_contigs_ok(self):
obj_name = "contigset.1"
contig1 = {'id': '1', 'length': 10, 'md5': 'md5', 'sequence': 'agcttttcat'}
contig2 = {'id': '2', 'length': 5, 'md5': 'md5', 'sequence': 'agctt'}
contig3 = {'id': '3', 'length': 12, 'md5': 'md5', 'sequence': 'agcttttcatgg'}
obj1 = {'contigs': [contig1, contig2, contig3], 'id': 'id', 'md5': 'md5', 'name': 'name',
'source': 'source', 'source_id': 'source_id', 'type': 'type'}
self.getWsClient().save_objects({'workspace': self.getWsName(), 'objects':
[{'type': 'KBaseGenomes.ContigSet', 'name': obj_name, 'data': obj1}]})
ret = self.getImpl().filter_contigs(self.getContext(), {'workspace': self.getWsName(),
'contigset_id': obj_name, 'min_length': '10'})
obj2 = self.getWsClient().get_objects([{'ref': self.getWsName()+'/'+obj_name}])[0]['data']
self.assertEqual(len(obj2['contigs']), 2)
self.assertTrue(len(obj2['contigs'][0]['sequence']) >= 10)
self.assertTrue(len(obj2['contigs'][1]['sequence']) >= 10)
self.assertEqual(ret[0]['n_initial_contigs'], 3)
self.assertEqual(ret[0]['n_contigs_removed'], 1)
self.assertEqual(ret[0]['n_contigs_remaining'], 2)
def test_filter_contigs_err1(self):
with self.assertRaises(ValueError) as context:
self.getImpl().filter_contigs(self.getContext(), {'workspace': self.getWsName(),
'contigset_id': 'fake', 'min_length': 10})
self.assertTrue('Error loading original ContigSet object' in str(context.exception))
def test_filter_contigs_err2(self):
with self.assertRaises(ValueError) as context:
self.getImpl().filter_contigs(self.getContext(), {'workspace': self.getWsName(),
'contigset_id': 'fake', 'min_length': '-10'})
self.assertTrue('min_length parameter shouldn\'t be negative' in str(context.exception))
def test_filter_contigs_err3(self):
with self.assertRaises(ValueError) as context:
self.getImpl().filter_contigs(self.getContext(), {'workspace': self.getWsName(),
'contigset_id': 'fake', 'min_length': 'ten'})
self.assertTrue('Cannot parse integer from min_length parameter' in str(context.exception))
|
# This script is used to parse BOOST special function test data into something
# we can easily import in numpy.
import re
import os
# Where to put the data (directory will be created)
DATA_DIR = 'scipy/special/tests/data/boost'
# Where to pull out boost data
BOOST_SRC = "boostmath/test"
CXX_COMMENT = re.compile(r'^\s+//')
DATA_REGEX = re.compile(r'^\s*/*\{*\s*SC_')
ITEM_REGEX = re.compile(r'[+-]?\d*\.?\d+(?:[eE][+-]?\d+)?')
HEADER_REGEX = re.compile(
r'const boost::array\<boost::array\<.*, (\d+)\>, (\d+)\> ([a-zA-Z_\d]+)')
IGNORE_PATTERNS = [
# Makes use of ldexp and casts
"hypergeometric_1F1_big_double_limited.ipp",
"hypergeometric_1F1_big_unsolved.ipp",
# Makes use of numeric_limits and ternary operator
"beta_small_data.ipp",
# Doesn't contain any data
"almost_equal.ipp",
# Derivatives functions don't exist
"bessel_y01_prime_data.ipp",
"bessel_yn_prime_data.ipp",
"sph_bessel_prime_data.ipp",
"sph_neumann_prime_data.ipp",
# Data files not needed by scipy special tests.
"ibeta_derivative_",
r"ellint_d2?_",
"jacobi_",
"heuman_lambda_",
"hypergeometric_",
"nct_",
r".*gammap1m1_",
"trig_",
"powm1_data.ipp",
]
def _raw_data(line):
items = line.split(',')
l = []
for item in items:
m = ITEM_REGEX.search(item)
if m:
q = m.group(0)
l.append(q)
return l
def parse_ipp_file(filename):
print(filename)
with open(filename, 'r') as a:
lines = a.readlines()
data = {}
i = 0
while (i < len(lines)):
line = lines[i]
m = HEADER_REGEX.search(line)
if m:
d = int(m.group(1))
n = int(m.group(2))
print(f"d = {d}, n = {n}")
cdata = []
i += 1
line = lines[i]
# Skip comments
while CXX_COMMENT.match(line):
i += 1
line = lines[i]
while DATA_REGEX.match(line):
cdata.append(_raw_data(line))
i += 1
line = lines[i]
# Skip comments
while CXX_COMMENT.match(line):
i += 1
line = lines[i]
if not len(cdata) == n:
raise ValueError(f"pa | rsed data: {len(cdata)}, expected {n}")
data[m.group(3)] = cdata
else:
i += 1
return data
def dump_dataset(filename, data):
fid = open(filename, 'w')
try:
for line in data:
fid.write("%s\n" % " ".join(line))
finally:
fid.close()
def dump_datasets(filename):
| base, ext = os.path.splitext(os.path.basename(filename))
base += '_%s' % ext[1:]
datadir = os.path.join(DATA_DIR, base)
os.makedirs(datadir)
datasets = parse_ipp_file(filename)
for k, d in datasets.items():
print(k, len(d))
dfilename = os.path.join(datadir, k) + '.txt'
dump_dataset(dfilename, d)
if __name__ == '__main__':
for filename in sorted(os.listdir(BOOST_SRC)):
# Note: Misses data in hpp files (e.x. powm1_sqrtp1m1_test.hpp)
if filename.endswith(".ipp"):
if any(re.match(pattern, filename) for pattern in IGNORE_PATTERNS):
continue
path = os.path.join(BOOST_SRC, filename)
print(f"================= {path} ===============")
dump_datasets(path)
|
import unittest
import graph
class BreadthFirstSearchTest(unittest.TestCase):
__runSlowTests = False
def testTinyGraph(self):
g = graph.Graph.from_file('tinyG.txt')
bfs = graph.BreadthFirstSearch(g, 0)
self.assertEqual(7, bfs.count())
self.assertFalse(bfs.connected(7))
self.assertIsNone(bfs.path_to(7))
self.assertFalse(bfs.connected(8))
self.assertIsNone(bfs.path_to(8))
self.assertFalse(bfs.connected(9))
self.assertIsNone(bfs.path_to(9))
self.assertFalse(bfs.connected(12))
self.assertIsNone(bfs.path_to(12))
self.assertEqual([2, 0], bfs.path_to(2))
self.assertEqual(1, bfs.distance(2))
self.assertEqual([3, 5, 0], bfs.path_to(3))
self.assertEqual(2, bfs.distance(3))
self.assertEqual([4, 5, 0], bfs.path_to(4))
self.assertEqual(2, bfs.distance(4))
self.assertEqual([5, 0], bfs.path_to(5))
self.assertEqual(1, bfs.distance(5))
def testMedGraph(self):
g = graph.Graph.from_file('mediumG.txt')
bfs = graph.BreadthFirstSearch(g, 0)
self.assertEqual(250, bfs.count())
self.assertTrue(bfs.connected(123))
self.assertEqual(9, bfs.distance(123))
self.assertEqual([123, 246, 244, 207, 122, 92, 171, 165, 68, 0], bfs.path_to(123))
def testTinyDG(self):
g = graph.Graph.from_file('tinyDG.txt', directed=True)
bfs = graph.BreadthFirstSearch(g, 0)
self.assertEqual(6, bfs.count())
self.assertTrue(bfs.connected(4))
self.assertIsNotNone( | bfs.path_to(4))
self.assertFalse(bfs.connected(7))
self.assertIsNone(bfs.path_to(7))
self.assertEqual([2, 4, 5, 0], bfs.path_to(2))
self.assertEqual(3, bfs.distance(2))
def testTinyDAG(self):
| g = graph.Graph.from_file('tinyDAG.txt', directed=True)
bfs = graph.BreadthFirstSearch(g, 0)
self.assertEqual(9, bfs.count())
self.assertTrue(bfs.connected(4))
self.assertIsNotNone(bfs.path_to(4))
self.assertFalse(bfs.connected(7))
self.assertIsNone(bfs.path_to(7))
self.assertEqual([12, 9, 6, 0], bfs.path_to(12))
self.assertEqual(3, bfs.distance(12))
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
import click
from datasciencebox.cli.main import cli, default_options
@cli.group(short_help='Install packages, applications and more')
@click.pass_context
def install(ctx):
pass
@install.command('miniconda', short_help='Install miniconda in the instances')
@click.option('--ssh', is_flag=True, required=False, show_default=True, help='Whether to use ssh')
@click.option('--target', '-t', required=False, help='Wildcard matching salt minions')
@default_options
@click.pass_context
def install_miniconda(ctx, ssh, target):
project = ctx.obj['project']
out = project.salt('state.sls', args=['miniconda'], target=target, ssh=ssh)
click.echo(out)
if not ssh:
out = project.salt('saltutil.sync_all', target=target)
click.echo(out)
@install.command('salt', short_help='Install salt master and minion(s) via salt-ssh')
@default_options
@click.pass_context
def install_salt(ctx):
project = ctx.obj['project']
click.echo('Installing salt (master mode)')
out = project.salt('state.sls', args=['salt.cluster'], target='*', ssh=True)
click.echo(out)
click.echo('Syncing formulas')
from datasciencebox.cli.base import sync
ctx.invoke(sync)
@install.command('pkg', short_help='Install a package using system package manager')
@click.argument('pkg', required=True)
@click.option('--ssh', is_flag=True, required=False, show_default=True, help='Whether to use ssh')
@click.option('--target', '-t', required=False, help='Wildcard matching salt minions')
@default_options
@click.pass_context
def install_pkg(ctx, pkg, ssh, target):
project = ctx.obj['project']
args = [pkg]
out = project.salt('pkg.install', args=args, target=target, ssh=ssh)
click.echo(out)
@install.command('conda', short_help='Install conda package')
@click.argument('pkg', required=True)
@click.option('--ssh', is_flag=True, required=False, show_default=True, help='Whether to use ssh')
@click.option('--target', '-t', required=False, help='Wildcard matching salt minions')
@default_options
@click.pass_context
def install_conda(ctx, pkg, ssh, target):
project = ctx.obj['project']
out = project.salt('conda.install',
args=[pkg],
kwargs={'user': project.settings['USERNAME']},
target=target,
ssh=ssh)
click.echo(out)
@install.command('cloudera-manager', short_help='Install Cloudera Manager in the cluster')
@click.option('--ssh', is_flag=True, required=False, show_default=True, help='Whether to use ssh')
@default_options
@click.pass_context
def install_hdfs(ctx, ssh):
project = ctx.obj['project']
click.echo('Step 1/1: Cloudera Manager')
out = project.salt('state.sls', args=['cdh5.manager.cluster'], target='*', ssh=ssh)
click.echo(out)
@install.command('notebook', short_help='Install Jupyter notebook in the head node')
@click.option('--ssh', is_flag=True, required=False, show_default=True, help='Whether to use ssh')
@default_options
@click.pass_context
def install_notebook(ctx, ssh):
project = ctx.obj['project']
click.echo('Step 1/2: Conda (head only)')
out = project.salt('state.sls', args=['miniconda'], target='head', ssh=ssh)
click.echo(out)
if not ssh:
out = project.salt('saltutil.sync_all', target='head')
click.echo(out)
click.echo('Step 2/2: Jupyter Notebook')
out = project.salt('state.sls', args=['ipython.notebook'], target='head', ssh=ssh)
click.echo(out)
@install.command('hdfs', short_help='Install hdfs in the cluster')
@click.option('--ssh', is_flag=True, required=False, show_default=True, help='Whether to use ssh')
@default_options
@click.pass_context
def install_hdfs(ctx, ssh):
project = ctx.obj['project']
click.echo('Step 1/1: HDFS')
out = project.salt('state.sls', args=['cdh5.hdfs.cluster'], target='*', ssh=ssh)
click.echo(out)
@install.command('mesos', short_help='Install mesos in the cluster')
@click.option('--ssh', is_flag=True, required=False, show_default=True, help='Whether to use ssh')
@default_options
@click.pass_context
def install_mesos(ctx, ssh):
project = ctx.obj['project']
click.echo('Step 1/2: Zookeeper')
out = project.salt('state.sls', args=['cdh5.zookeeper.cluster'], target='head', ssh=ssh)
click.echo(out)
click.echo('Step 2/2: Mesos')
out = project.salt('state.sls', args=['mesos.cluster'], target='*', ssh=ssh)
click.echo(out)
@install.command('marathon', short_help='Install mesos in the cluster')
@click.option('--ssh', is_flag=True, required=False, show_default=True, help='Whether to use ssh')
@default_options
@click.pass_context
def install_marathon(ctx, ssh):
project = ctx.obj['project']
click.echo('Step 1/3: Zookeeper')
out = project.salt('state.sls', args=['cdh5.zookeeper.cluster'], target='head', ssh=ssh)
click.echo(out)
click.echo('Step 2/3: Mesos')
out = project.salt('state.sls', args=['mesos.cluster'], target='*', ssh=ssh)
click.echo(out)
click.echo('Step 3/3: Marathon')
out = project.salt('state.sls', args=['mesos.marathon'], target='head', ssh=ssh)
click.echo(out)
@install.command('spark', short_help='Install spark (on Mesos)')
@click.option('--ssh', is_flag=True, required=False, show_default=True, help='Whether to use ssh')
@default_options
@click.pass_context
def install_spark(ctx, ssh):
project = ctx.obj['project']
click.echo('Step 1/4: Zookeeper')
out = project.salt('state.sls', args=['cdh5.zookeeper.cluster'], target='head', ssh=ssh)
click.echo(out)
click.echo('Step 2/4: HDFS')
out = project.salt('state.sls', args=['cdh5.hdfs.cluster'], target='*', ssh=ssh)
click.echo(out)
click.echo('Step 3/4: Mesos')
out = project.salt('state.sls', args=['mesos.cluster'], target='*', ssh=ssh)
click.echo(out)
click.echo('Step 4/4: Spark on Mesos')
out = project.salt('state.sls', args=['mesos.spark'], target='head', ssh=ssh)
click.echo(out)
@install.command('impala', short_help='Install Impala')
@click.option('--ssh', is_flag=True, required=False, show_default=True, help='Whether to use ssh')
@default_options
@click.pass_context
def install_impala(ctx, ssh):
project = ctx.obj['project']
click.echo('Step 1/4: Zookeeper')
out = project.salt('state.sls', args=['cdh5.zookeeper.cluster'], target='head', ssh=ssh)
click.echo(out)
click.echo('Step 2/4: HDFS')
out = project.salt('state.sls', args=['cdh5.hdfs.cluster'], target='*', ssh=ssh)
click.echo(out)
click.echo('Step 3/4: Hive Metastore')
out = project.salt('state.sls', args=['cdh5.hive.metastore'], target='head', ssh=ssh)
click.echo(out)
click.e | cho('Step 4/4: Impala')
out = project.salt('state.sls', args=['cdh5.impala.clu | ster'], target='*', ssh=ssh)
click.echo(out)
|
# Copyright 2013 | IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not | use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import ssl
from oslo.config import cfg
from volt.openstack.common.gettextutils import _
ssl_opts = [
cfg.StrOpt('ca_file',
default=None,
help="CA certificate file to use to verify "
"connecting clients."),
cfg.StrOpt('cert_file',
default=None,
help="Certificate file to use when starting "
"the server securely."),
cfg.StrOpt('key_file',
default=None,
help="Private key file to use when starting "
"the server securely."),
]
CONF = cfg.CONF
CONF.register_opts(ssl_opts, "ssl")
def is_enabled():
cert_file = CONF.ssl.cert_file
key_file = CONF.ssl.key_file
ca_file = CONF.ssl.ca_file
use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
return use_ssl
def wrap(sock):
ssl_kwargs = {
'server_side': True,
'certfile': CONF.ssl.cert_file,
'keyfile': CONF.ssl.key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl.ca_file:
ssl_kwargs['ca_certs'] = CONF.ssl.ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
_SSL_PROTOCOLS = {
"tlsv1": ssl.PROTOCOL_TLSv1,
"sslv23": ssl.PROTOCOL_SSLv23,
"sslv3": ssl.PROTOCOL_SSLv3
}
try:
_SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2
except AttributeError:
pass
def validate_ssl_version(version):
key = version.lower()
try:
return _SSL_PROTOCOLS[key]
except KeyError:
raise RuntimeError(_("Invalid SSL version : %s") % version)
|
from jinja2 import Environment, PackageLoader
import json
env = Environment(loader=PackageLoader('core_serializers', 'templates'))
class FormRenderer:
template_name = 'form.html'
def render_field(self, field_result, **options):
field, value, error = field_result
class_name = field.__class__.__name__
layout = options.get('layout', 'vertical')
context = {}
if class_name == 'BooleanField':
base = 'checkbox.html'
elif class_name == 'IntegerField':
base = 'input.html'
context = {'input_type': 'number'}
elif class_name == 'ChoiceField':
if field.style.get('type') == 'radio':
base = 'select_radio.html'
else:
base = 'select.html'
elif class_name == 'MultipleChoiceField':
if field.style.get('type') == 'checkbox':
base = 'select_checkbox.html'
else:
base = 'select_multiple.html'
else:
# CharField, and anything unknown
if field.style.get('type') == 'textarea' and layout != 'inline':
base = 'textarea.html'
else:
| base = 'input.html'
| context = {'input_type': 'text'}
template_name = 'fields/' + layout + '/' + base
template = env.get_template(template_name)
return template.render(field=field, value=value, **context)
def render(self, form, **options):
style = getattr(getattr(form, 'Meta', None), 'style', {})
layout = style.get('layout', 'vertical')
template = env.get_template(self.template_name)
return template.render(form=form, renderer=self, layout=layout)
class JSONRenderer:
indent = None
def __init__(self, indent=None):
self.indent = self.indent if (indent is None) else indent
def render(self, data, **options):
indent = options.get('indent', self.indent)
return json.dumps(data, indent=indent)
|
import csv
import tempfile
import os
from transaction import Transaction
class LastTransactionsParser:
LAST_TRANSACTIONS_FILENAME = os.path.join(tempfile.gettempdir(), 'raapija_transactions_last.csv')
@staticme | thod
def read():
try:
with open(LastTransactionsParser.LAST_TRANSACTIONS_FILENAME, 'r', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
return [Transaction(**transaction_dict) for transaction_dict in reader]
except FileNotFoundError:
return None
@staticmethod
def write(transactions):
with open(LastTransactionsParser.LAST_TRANSACTIO | NS_FILENAME, 'w', encoding='utf-8') as csvfile:
csv_fieldnames = transactions[0].__dict__.keys()
writer = csv.DictWriter(csvfile, csv_fieldnames)
writer.writeheader()
for transaction in transactions:
writer.writerow(transaction.__dict__)
|
#!/usr/bin/env python
# Copyright (c) 2015, Scott D. Peckham
#------------------------------------------------------
# S.D. Peckham
# July 9, 2015
#
# Tool to break CSDMS Standard Variable Names into
# all of their component parts, then save results in
# various formats. (e.g. Turtle TTL format)
#
# Example of use at a Unix prompt:
#
# % ./parse_csvn.py CSN_VarNames_v0.83.txt
#------------------------------------------------------
#
# Functions:
# parse_names()
#
#------------------------------------------------------
import os.path
import sys
#------------------------------------------------------
def parse_names( in_file='CSN_VarNames_v0.83.txt' ):
#--------------------------------------------------
# Open input file that contains copied names table
#--------------------------------------------------
try:
in_unit = open( in_file, 'r' )
except:
print 'SORRY: Could not open TXT file named:'
print ' ' + in_file
#-------------------------
# Open new CSV text file
#-------------------------
## pos = in_file.rfind('.')
## prefix = in_file[0:pos]
## out_file = prefix + '.ttl'
out_file = 'CSN_VarNames_v0.83.ttl'
#-------------------------------------------
OUT_EXISTS = os.path.exists( out_file )
if (OUT_EXISTS):
print 'SORRY, A TTL file with the name'
print ' ' + out_file
print ' already exists.'
return
out_unit = open( out_file, 'w' )
#------------------------
# Write TTL file header
#------------------------
out_unit.write( '@prefix dc: <http://purl.org/dc/elements/1.1/> .' + '\n' )
out_unit.write( '@prefix ns: <http://example.org/ns#> .' + '\n' )
out_unit.write( '@prefix vcard: <http://www.w3.org/2001/vcard-rdf/3.0#> .' + '\n')
out_unit.write( '@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .' + '\n' )
out_unit.write( '@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .' + '\n' )
out_unit.write( '@prefix owl: <http://www.w3.org/2002/07/owl#> .' + '\n' )
out_unit.write( '@prefix csn: <http://ecgs.ncsa.illinois.edu/2015/csn#> .' + '\n' )
out_unit.write( '\n' ) # (blank line)
root_quan_list = list() # (list to save all root quantities)
#---------------------------
# Parse all variable names
#---------------------------
indent = ' ' # (four spaces)
n_names = 0
while (True):
#------------------------------
# Read data line from in_file
#------------------------------
line = in_unit.readline()
if (line == ''):
break
#------------------------------------ | -----
# Write entire variable name to TTL file
#-----------------------------------------
line = line.strip() # (strip leading/trailing white space)
out_unit.write( '<csn:' + line + '>\n' )
#--------------------------------------------------
# Write object and quantity fullnames to TTL file
#--------------------------------------------------
main_parts = line.split('__')
object_part | = main_parts[0]
quantity_part = main_parts[1]
out_unit.write( indent + 'a csn:name ;\n' )
out_unit.write( indent + "csn:object_fullname '" + object_part + "' ;\n" )
out_unit.write( indent + "csn:quantity_fullname '" + quantity_part + "' ;\n" )
#---------------------------------------------
# Write parts of object_fullname to TTL file
#---------------------------------------------
object_list = object_part.split('_')
n_objects = len( object_list )
for k in xrange( n_objects ):
object = object_list[k]
obj_string = " '" + object + "' "
obj_prefix = indent + "csn:object" + str(k+1)
out_unit.write( obj_prefix + obj_string + ";\n")
adj_list = object.split('~')
n_adjectives = len(adj_list) - 1 # (first one in list is the object)
for j in xrange( n_adjectives ):
adj_string = " '" + adj_list[j+1] + "' "
adj_prefix = obj_prefix + "_adjective" + str(j+1)
out_unit.write( adj_prefix + adj_string + ";\n" )
#-------------------------------------
# Write root object name to TTL file
#-------------------------------------
root_object = object_list[-1] # (last object in list)
root_obj_string = " '" + root_object + "' "
root_obj_prefix = indent + "csn:root_object"
out_unit.write( root_obj_prefix + root_obj_string + ";\n" )
#--------------------------------------------------------
# Write all operations in quantity_fullname to TTL file
#--------------------------------------------------------
operation_list = quantity_part.split('_of_')
n_operations = len(operation_list) - 1 # (last one in list is the quantity)
for k in xrange( n_operations ):
operation = operation_list[k]
op_string = " '" + operation + "' "
op_prefix = indent + "csn:operation" + str(k+1)
out_unit.write( op_prefix + op_string + ";\n" )
#----------------------------------
# Write quantity name to TTL file
#----------------------------------
quantity = operation_list[-1]
quan_string = " '" + quantity + "' "
quan_prefix = indent + "csn:quantity"
out_unit.write( quan_prefix + quan_string + ";\n" )
#---------------------------------------
# Write root quantity name to TTL file
#---------------------------------------
quantity_parts = quantity.split('_')
root_quantity = quantity_parts[-1]
root_quan_string = " '" + root_quantity + "' "
root_quan_prefix = indent + "csn:root_quantity"
out_unit.write( root_quan_prefix + root_quan_string + ".\n" ) # (Notice "." vs. ";" here.)
out_unit.write( '\n' ) # (blank line)
root_quan_list.append( root_quantity ) # (save in root_quan_list)
n_names += 1
#----------------------
# Close the input file
#----------------------
in_unit.close()
#----------------------------
# Close the TXT output file
#----------------------------
out_unit.close()
print 'Finished writing CSN var names as TTL.'
print 'Number of names =', n_names, '.'
print ' '
#-----------------------------------------
# Write unique root quantities to a file
#-----------------------------------------
uniq_root_quan_list = sorted( set(root_quan_list) )
n_uniq_root_quans = len( uniq_root_quan_list )
root_quan_unit = open( 'Root_Quantities.txt', 'w' )
for k in xrange( n_uniq_root_quans ):
root_quantity = uniq_root_quan_list[k]
root_quan_unit.write( root_quantity + '\n' )
root_quan_unit.close()
print 'Number of root quantities =', n_uniq_root_quans, '.'
print ' '
# parse_names()
#------------------------------------------------------
if (__name__ == "__main__"):
#-----------------------------------------------------
# Note: First arg in sys.argv is the command itself.
#-----------------------------------------------------
n_args = len(sys.argv)
if (n_args < 2):
print 'ERROR: This tool requires an input'
print ' text file argument.'
print 'sys.argv =', sys.argv
print ' '
elif (n_args == 2):
parse_names( sys.argv[1] )
else:
print 'ERROR: Invalid number of arguments.'
#-----------------------------------------------------------------------
|
from flask import Flask, Response
import twilio.twiml
app = Flask(__name__)
@app.route("/voice", methods=['POST'])
def get_voice_twiml():
"""Respond to incoming calls with a simple text message."""
resp = twilio.twiml.Response()
resp.say("Thanks for calling!")
| return Response(str(resp), mimetype='text/xml')
|
if __name__ == "__main__":
app.run(debug=True)
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Add CycleTaskGroupObject.object
Revision ID: 26d9c9c91542
Revises: 19a67dc67c3
Create Date: 2014-07-15 21:49:34.073412
"""
# revision identifiers, used by Alembic.
revision = '26d9c9c91542'
down_revision = '19a67dc67c3'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('cycle_task_group_objects', sa.Column('object_id', sa.In | teger(), nullable=False))
op.add_column('cycle_task_group_objects', sa.Column('object_type', sa.String(length=250), nullable=False))
op.execute('''
UPDATE cycle_task_group_objects
JOIN task_group_objects
ON cycle_task_group_objects.task_group_object_id = task_group_objects.id
SET
cycle_task_group_objects.object_id = task_group_objects.object_id,
cycle_task_group_objects.object_type = task_group_objects.object_type;
| ''')
def downgrade():
op.drop_column('cycle_task_group_objects', 'object_type')
op.drop_column('cycle_task_group_objects', 'object_id')
|
######################################
root_dir = '/home/linlin/time/0903_classify_false_start/1003_raw_features/'
separator = '\t\t'
################################################################
def MakeNewFolderVersionHigher(data_directory, dir_name):
## 在选定的文件夹里生成更高版本号的文件夹 data_directory - can be relative directory
## dir_name - the new folder name you want to creat
abs_data_directory = os.path.abspath(os.path.dirname(data_directory))
version_number = 1
dirs = os.listdir(abs_data_directory)
for dir in dirs:
if dir_name in dir:
version_str = re.findall(r'Dir_\d+',dir)
number_str =''.join((version_str[-1])[4:])
if True == number_str.isdigit():
number= int (number_str)
if number>version_number:
version_number = number
new_folder_name = dir_name + "_%d" %(version_number+1)
folderFullPath = os.path.join(abs_data_directory,new_folder_name )
os.makedirs(folderFullPath)
return folderFullPath
#########################################################
output_root_dir = MakeNewFolderVersionHigher(root_dir, 'processDir' )
data_dir = root_dir + 'data1'
code_dir = root_dir + 'src/'
##############################################################
def DirProcessing(source_path, dest | _path):
path = source_path
for root, dirs, files in os.walk(path):
for filespath in files:
abs_file_path = os.path.join(root, filespath)
logger.debug("Visited one file!")
| Standardize(abs_file_path, dest_path, ' ')
def DirProcessingForSSR(source_path, dest_path):
path = source_path
for root, dirs, files in os.walk(path):
for filespath in files:
abs_file_path = os.path.join(root, filespath)
logger.debug("Visited one file!")
GetSsrFeature(abs_file_path, dest_path, '\t')
def GetAttributes(source_path, dest_path):
################################################################
script_file = code_dir + 'chunker6_only_ssr_repetition.py'
################################################################
path = source_path
for root, dirs, files in os.walk(path):
for filespath in files:
abs_file_path = os.path.join(root, filespath)
logger.debug("Visited one file!")
crf_path = dest_path + '/' + os.path.basename(abs_file_path) + '.crfsuite'
os.system('cat ' + abs_file_path +' | python ' + script_file + " > " + crf_path )
def RunClassifier(source_path, dest_path):
path = source_path
for root, dirs, files in os.walk(path):
for filespath in files:
if 'tr.txt' in filespath:
train_path = os.path.join(root, filespath)
elif 'te.txt' in filespath:
test_path = os.path.join(root, filespath)
#pdb.set_trace()
result_path = dest_path + '/' + 'result.txt'
os.system('crfsuite learn -e2 ' + train_path + " " + test_path + " > " + result_path )
def FindNeighborTokenSubscript(first_token_list, current_pos , up_or_down ):
pos = current_pos
ind = up_or_down
li = first_token_list
if ind == 1:
i = 1
while len(li[pos+i]) < 1:
i += 1
return pos+i
if ind == -1:
i = 1
while len(li[pos-i]) < 1:
i += 1
return pos-i
def Standardize(path, dest_dir, sep):
#####################################
scale_str = ':5'
#####################################
output_path = dest_dir+ '/' + os.path.basename(path) + '.standard'
output_file_obj = open(output_path,'w')
file_obj = open(path)
line_list = file_obj.readlines()
token_list = []
for j in range(len(line_list)):
word_list = line_list[j].split()
if len(word_list) < 2:
token_list.append('')
else:
token_list.append(word_list[0])
repetition_vec_list = []
for i in range(len(line_list)):
if len(token_list[i]) == 0:
repetition_vec_list.append('')
else:
if i < 4 or i > len(line_list)- 5:
repetition_vec_list.append(['diff', 'diff','diff', 'diff'])
else:
previous_subscript = FindNeighborTokenSubscript(token_list, i, -1)
prev_prev_subscript = FindNeighborTokenSubscript(token_list, previous_subscript, -1)
next_subscript = FindNeighborTokenSubscript(token_list, i, 1)
next_next_subscript = FindNeighborTokenSubscript(token_list, next_subscript, 1)
prev_prev_label = 'same'+scale_str if (token_list[i] == token_list[prev_prev_subscript]) else "diff"
prev_label = 'same'+scale_str if (token_list[i] == token_list[previous_subscript]) else "diff"
next_label = 'same'+scale_str if (token_list[i] == token_list[next_subscript]) else "diff"
next_next_subscript = 'same'+scale_str if (token_list[i] == token_list[next_next_subscript]) else "diff"
repetition_vec_list.append([prev_prev_label, prev_label, next_label, next_next_subscript])
for k in range(len(line_list)):
line = line_list[k]
if len(line)<13:
label = ''
else:
word_list = line.split()
if 'filler' in word_list[4]:
label = 'filler'
elif 'repeat' in word_list[4] or 'nsert' in word_list[4]:
label = 'repeat'
elif 'restart' in word_list[4] or 'extraneou' in word_list[4]:
label = 'false_start'
elif 'elete' in word_list[4]:
label = 'other'
else:
label = 'OK'
if '-' in word_list[0]:
patial = 'patial'+scale_str
else:
patial = 'nonpatial'
label = label
token = word_list[0]
pos = word_list[1]
word = word_list[2]
sem = word_list[3]
patial = patial
#pdb.set_trace()
pp = repetition_vec_list[k][0]
p = repetition_vec_list[k][1]
n = repetition_vec_list[k][2]
nn = repetition_vec_list[k][3]
#pdb.set_trace()
if len(line)<13:
line_format = ''
else:
line_format = (
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
%(label, sep, token,sep,pos, sep,word,sep,sem, sep, patial, sep,
pp, sep, p, sep, n,sep, nn))
output_file_obj.write(line_format)
output_file_obj.write('\n')
output_file_obj.close()
file_obj.close()
def GetSsrFeature(path, dest_dir, sep):
output_path = dest_dir+ '/' + os.path.basename(path) + '.noSpace'
output_file_obj = open(output_path,'w')
file_obj = open(path)
for line in file_obj:
if len(line)<3:
newLine = ''
else:
word_list = line[54:].split()
newLine = '_'.join(word_list)
token = line[:15].strip()
pos = line[15:25].strip()
word = line[25:40].strip()
sem = line[40:54].strip()
label = newLine
if len(line)<3:
line_format = ''
else:
line_format = "%s%s%s%s%s%s%s%s%s%s" %(token,sep,pos,sep,word,sep,sem, sep, label, sep)
output_file_obj.write(line_format)
output_file_obj.write('\n')
output_file_obj.close()
file_obj.close()
if __name__ == '__main__':
logFile = output_root_dir + "/logFile.txt"
logging.basicConfig(filename=logFile, level = logging.DEBUG)
os.makedirs(output_root_dir + "/standardStep1")
dest_dir = output_root_dir + "/standardStep1"
DirProcessing(data_dir, dest_dir)
# os.makedirs(output_root_dir + "/standardStep2") #
# dest_dir = output_root_dir + "/standardStep2"
# DirProcessing(data_dir, dest_dir) #
os.makedirs(output_root_dir + "/attributesStep3")
attr_dir = output_root_dir + "/attributesStep3"
GetAttributes(dest_dir, attr_dir)
os.makedirs(output_root_dir + "/classificationStep4")
result_dir = output |
imp | ort pymark
pets_mod = pymark.unpack_file("pets_two.pmk")
print "TypeID: %i" % pets_mod["pets"]["catherine"]["type"]
print "Name: %s" % pets_mod["pets"]["catherine"]["name"]
print "Color: (%i, %i, %i)" % pets_mod["pets"]["catherine"]["col | or"]
|
"""
Support for real-time departure information for public transport in Munich.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.mvglive/
"""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, ATTR_ATTRIBUTION, STATE_UNKNOWN
)
REQUIREMENTS = ['PyMVGLive==1.1.4']
_LOGGER = logging.getLogger(__name__)
CONF_NEXT_DEPARTURE = 'nextdeparture'
CONF_STATION = 'station'
CONF_DESTINATIONS = 'destinations'
CONF_DIRECTIONS = 'directions'
CONF_LINES = 'lines'
CONF_PRODUCTS = 'products'
CONF_TIMEOFFSET = 'timeoffset'
DEFAULT_PRODUCT = ['U-Bahn', 'Tram', 'Bus', 'S-Bahn']
ICONS = {
'U-Bahn': 'mdi:subway',
'Tram': 'mdi:tram',
'Bus': 'mdi:bus',
'S-Bahn': ' | mdi:train',
'SEV': 'mdi:checkbox-blank-circle-outline',
'-': 'mdi:clock'
}
ATTRIBUTION = "Data provided by MVG-live.de"
SCAN_INTERVAL = timedelta(seconds=30)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NEXT_DEPARTURE): [{
vol.Required(CONF_STATION): cv.string,
vol.Optional(CONF_DESTINATIONS, default=['']): | cv.ensure_list_csv,
vol.Optional(CONF_DIRECTIONS, default=['']): cv.ensure_list_csv,
vol.Optional(CONF_LINES, default=['']): cv.ensure_list_csv,
vol.Optional(CONF_PRODUCTS, default=DEFAULT_PRODUCT):
cv.ensure_list_csv,
vol.Optional(CONF_TIMEOFFSET, default=0): cv.positive_int,
vol.Optional(CONF_NAME): cv.string}]
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the MVGLive sensor."""
sensors = []
for nextdeparture in config.get(CONF_NEXT_DEPARTURE):
sensors.append(
MVGLiveSensor(
nextdeparture.get(CONF_STATION),
nextdeparture.get(CONF_DESTINATIONS),
nextdeparture.get(CONF_DIRECTIONS),
nextdeparture.get(CONF_LINES),
nextdeparture.get(CONF_PRODUCTS),
nextdeparture.get(CONF_TIMEOFFSET),
nextdeparture.get(CONF_NAME)))
add_devices(sensors, True)
# pylint: disable=too-few-public-methods
class MVGLiveSensor(Entity):
"""Implementation of an MVG Live sensor."""
def __init__(self, station, destinations, directions,
lines, products, timeoffset, name):
"""Initialize the sensor."""
self._station = station
self._name = name
self.data = MVGLiveData(station, destinations, directions,
lines, products, timeoffset)
self._state = STATE_UNKNOWN
self._icon = ICONS['-']
@property
def name(self):
"""Return the name of the sensor."""
if self._name:
return self._name
return self._station
@property
def state(self):
"""Return the next departure time."""
return self._state
@property
def state_attributes(self):
"""Return the state attributes."""
return self.data.departures
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return "min"
def update(self):
"""Get the latest data and update the state."""
self.data.update()
if not self.data.departures:
self._state = '-'
self._icon = ICONS['-']
else:
self._state = self.data.departures.get('time', '-')
self._icon = ICONS[self.data.departures.get('product', '-')]
class MVGLiveData(object):
"""Pull data from the mvg-live.de web page."""
def __init__(self, station, destinations, directions,
lines, products, timeoffset):
"""Initialize the sensor."""
import MVGLive
self._station = station
self._destinations = destinations
self._directions = directions
self._lines = lines
self._products = products
self._timeoffset = timeoffset
self._include_ubahn = True if 'U-Bahn' in self._products else False
self._include_tram = True if 'Tram' in self._products else False
self._include_bus = True if 'Bus' in self._products else False
self._include_sbahn = True if 'S-Bahn' in self._products else False
self.mvg = MVGLive.MVGLive()
self.departures = {}
def update(self):
"""Update the connection data."""
try:
_departures = self.mvg.getlivedata(
station=self._station, ubahn=self._include_ubahn,
tram=self._include_tram, bus=self._include_bus,
sbahn=self._include_sbahn)
except ValueError:
self.departures = {}
_LOGGER.warning("Returned data not understood")
return
for _departure in _departures:
# find the first departure meeting the criteria
if ('' not in self._destinations[:1] and
_departure['destination'] not in self._destinations):
continue
elif ('' not in self._directions[:1] and
_departure['direction'] not in self._directions):
continue
elif ('' not in self._lines[:1] and
_departure['linename'] not in self._lines):
continue
elif _departure['time'] < self._timeoffset:
continue
# now select the relevant data
_nextdep = {ATTR_ATTRIBUTION: ATTRIBUTION}
for k in ['destination', 'linename', 'time', 'direction',
'product']:
_nextdep[k] = _departure.get(k, '')
_nextdep['time'] = int(_nextdep['time'])
self.departures = _nextdep
break
|
im | port inspect
import example
print(inspect.getsource(example.A.get | _name))
|
'''
cloudelements: tests module.
Meant for use with py.test.
Organize | tests into files, each named xxx_test.py
Read more here: http://pytest.o | rg/
Copyright 2015, LeadGenius
Licensed under MIT
''' |
# IfcOpenShell - IFC toolkit and geometry engine
# Copyright (C) 2021 Dion Moult <dion@thinkmoult.com>
#
# This file is | part of IfcOpenShell.
#
# IfcOpenShell is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either ve | rsion 3 of the License, or
# (at your option) any later version.
#
# IfcOpenShell is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with IfcOpenShell. If not, see <http://www.gnu.org/licenses/>.
class Usecase:
def __init__(self, file, **settings):
self.file = file
self.settings = {"ifc_class": None}
for key, value in settings.items():
self.settings[key] = value
def execute(self):
return self.file.create_entity(self.settings["ifc_class"])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.