code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of retrying logic."""
import collections
import functools
import itertools
import random
import sys
import time
_DEFAULT_JITTER_MS = 1000
class RetryerState(object):
"""Object that holds the state of the retryer."""
def __init__(self, retrial, time_passed_ms, time_to_wait_ms):
"""Initializer for RetryerState.
Args:
retrial: int, the retry attempt we are currently at.
time_passed_ms: int, number of ms that passed since we started retryer.
time_to_wait_ms: int, number of ms to wait for the until next trial.
If this number is -1, it means the iterable item that specifies the
next sleep value has raised StopIteration.
"""
self.retrial = retrial
self.time_passed_ms = time_passed_ms
self.time_to_wait_ms = time_to_wait_ms
class RetryException(Exception):
"""Raised to stop retrials on failure."""
def __init__(self, message, last_result, state):
self.message = message
self.last_result = last_result
self.state = state
super(RetryException, self).__init__(message)
def __str__(self):
return ('last_result={last_result}, last_retrial={last_retrial}, '
'time_passed_ms={time_passed_ms},'
'time_to_wait={time_to_wait_ms}'.format(
last_result=self.last_result,
last_retrial=self.state.retrial,
time_passed_ms=self.state.time_passed_ms,
time_to_wait_ms=self.state.time_to_wait_ms))
class WaitException(RetryException):
"""Raised when timeout was reached."""
class MaxRetrialsException(RetryException):
"""Raised when too many retrials reached."""
class Retryer(object):
"""Retries a function based on specified retry strategy."""
def __init__(self, max_retrials=None, max_wait_ms=None,
exponential_sleep_multiplier=None, jitter_ms=_DEFAULT_JITTER_MS,
status_update_func=None, wait_ceiling_ms=None):
"""Initializer for Retryer.
Args:
max_retrials: int, max number of retrials before raising RetryException.
max_wait_ms: int, number of ms to wait before raising
exponential_sleep_multiplier: float, The exponential factor to use on
subsequent retries.
jitter_ms: int, random [0, jitter_ms] additional value to wait.
status_update_func: func(result, state) called right after each trial.
wait_ceiling_ms: int, maximum wait time between retries, regardless of
modifiers added like exponential multiplier or jitter.
"""
self._max_retrials = max_retrials
self._max_wait_ms = max_wait_ms
self._exponential_sleep_multiplier = exponential_sleep_multiplier
self._jitter_ms = jitter_ms
self._status_update_func = status_update_func
self._wait_ceiling_ms = wait_ceiling_ms
def _RaiseIfStop(self, result, state):
if self._max_retrials is not None and self._max_retrials <= state.retrial:
raise MaxRetrialsException('Reached', result, state)
if self._max_wait_ms is not None:
if state.time_passed_ms + state.time_to_wait_ms > self._max_wait_ms:
raise WaitException('Timeout', result, state)
def _GetTimeToWait(self, last_retrial, sleep_ms):
"""Get time to wait after applying modifyers.
Apply the exponential sleep multiplyer, jitter and ceiling limiting to the
base sleep time.
Args:
last_retrial: int, which retry attempt we just tried. First try this is 0.
sleep_ms: int, how long to wait between the current trials.
Returns:
int, ms to wait before trying next attempt with all waiting logic applied.
"""
wait_time_ms = sleep_ms
if wait_time_ms:
if self._exponential_sleep_multiplier:
wait_time_ms *= self._exponential_sleep_multiplier ** last_retrial
if self._jitter_ms:
wait_time_ms += random.random() * self._jitter_ms
if self._wait_ceiling_ms:
wait_time_ms = min(wait_time_ms, self._wait_ceiling_ms)
return wait_time_ms
return 0
def RetryOnException(self, func, args=None, kwargs=None,
should_retry_if=None, sleep_ms=None):
"""Retries the function if an exception occurs.
Args:
func: The function to call and retry.
args: a sequence of positional arguments to be passed to func.
kwargs: a dictionary of positional arguments to be passed to func.
should_retry_if: func(exc_type, exc_value, exc_traceback, state) that
returns True or False.
sleep_ms: int or iterable for how long to wait between trials.
Returns:
Whatever the function returns.
Raises:
RetryException, WaitException: if function is retries too many times,
or time limit is reached.
"""
args = args if args is not None else ()
kwargs = kwargs if kwargs is not None else {}
def TryFunc():
try:
return func(*args, **kwargs), None
except: # pylint: disable=bare-except
return None, sys.exc_info()
if should_retry_if is None:
should_retry = lambda x, s: x[1] is not None
else:
def ShouldRetryFunc(try_func_result, state):
exc_info = try_func_result[1]
if exc_info is None:
# No exception, no reason to retry.
return False
return should_retry_if(exc_info[0], exc_info[1], exc_info[2], state)
should_retry = ShouldRetryFunc
result, exc_info = self.RetryOnResult(
TryFunc, should_retry_if=should_retry, sleep_ms=sleep_ms)
if exc_info:
# Exception that was not retried was raised. Re-raise.
raise exc_info[0], exc_info[1], exc_info[2]
return result
def RetryOnResult(self, func, args=None, kwargs=None,
should_retry_if=None, sleep_ms=None):
"""Retries the function if the given condition is satisfied.
Args:
func: The function to call and retry.
args: a sequence of arguments to be passed to func.
kwargs: a dictionary of positional arguments to be passed to func.
should_retry_if: result to retry on or func(result, RetryerState) that
returns True or False if we should retry or not.
sleep_ms: int or iterable, for how long to wait between trials.
Returns:
Whatever the function returns.
Raises:
MaxRetrialsException: function retried too many times.
WaitException: time limit is reached.
"""
args = args if args is not None else ()
kwargs = kwargs if kwargs is not None else {}
start_time_ms = _GetCurrentTimeMs()
retrial = 0
if callable(should_retry_if):
should_retry = should_retry_if
else:
should_retry = lambda x, s: x == should_retry_if
if isinstance(sleep_ms, collections.Iterable):
sleep_gen = iter(sleep_ms)
else:
sleep_gen = itertools.repeat(sleep_ms)
while True:
result = func(*args, **kwargs)
time_passed_ms = _GetCurrentTimeMs() - start_time_ms
try:
sleep_from_gen = sleep_gen.next()
except StopIteration:
time_to_wait_ms = -1
else:
time_to_wait_ms = self._GetTimeToWait(retrial, sleep_from_gen)
state = RetryerState(retrial, time_passed_ms, time_to_wait_ms)
if not should_retry(result, state):
return result
if time_to_wait_ms == -1:
raise MaxRetrialsException('Sleep iteration stop', result, state)
if self._status_update_func:
self._status_update_func(result, state)
self._RaiseIfStop(result, state)
_SleepMs(time_to_wait_ms)
retrial += 1
def RetryOnException(f=None, max_retrials=None, max_wait_ms=None,
sleep_ms=None, exponential_sleep_multiplier=None,
jitter_ms=_DEFAULT_JITTER_MS,
status_update_func=None,
should_retry_if=None):
"""A decorator to retry on exceptions.
Args:
f: a function to run possibly multiple times
max_retrials: int, max number of retrials before raising RetryException.
max_wait_ms: int, number of ms to wait before raising
sleep_ms: int or iterable, for how long to wait between trials.
exponential_sleep_multiplier: float, The exponential factor to use on
subsequent retries.
jitter_ms: int, random [0, jitter_ms] additional value to wait.
status_update_func: func(result, state) called right after each trail.
should_retry_if: func(exc_type, exc_value, exc_traceback, state) that
returns True or False.
Returns:
A version of f that is executed potentially multiple times and that
yields the first returned value or the last exception raised.
"""
if f is None:
# Returns a decorator---based on retry Retry with max_retrials,
# max_wait_ms, sleep_ms, etc. fixed.
return functools.partial(
RetryOnException,
exponential_sleep_multiplier=exponential_sleep_multiplier,
jitter_ms=jitter_ms,
max_retrials=max_retrials,
max_wait_ms=max_wait_ms,
should_retry_if=should_retry_if,
sleep_ms=sleep_ms,
status_update_func=status_update_func)
@functools.wraps(f)
def DecoratedFunction(*args, **kwargs):
retryer = Retryer(
max_retrials=max_retrials,
max_wait_ms=max_wait_ms,
exponential_sleep_multiplier=exponential_sleep_multiplier,
jitter_ms=jitter_ms,
status_update_func=status_update_func)
try:
return retryer.RetryOnException(f, args=args, kwargs=kwargs,
should_retry_if=should_retry_if,
sleep_ms=sleep_ms)
except MaxRetrialsException as mre:
to_reraise = mre.last_result[1]
raise to_reraise[0], to_reraise[1], to_reraise[2]
return DecoratedFunction
def _GetCurrentTimeMs():
return int(time.time() * 1000)
def _SleepMs(time_to_wait_ms):
time.sleep(time_to_wait_ms / 1000.0)
|
Sorsly/subtle
|
google-cloud-sdk/lib/googlecloudsdk/core/util/retry.py
|
Python
|
mit
| 10,477
|
import web
from . import csrf_protected, db, get_session, require_login, render, userRolesByName
from app.tools.pagination2 import doquery, countquery, getPaginationString
from app.tools.utils import default, lit
from settings import PAGE_LIMIT
class Users:
@require_login
def GET(self):
params = web.input(page=1, ed="", d_id="")
edit_val = params.ed
session = get_session()
try:
page = int(params.page)
except:
page = 1
limit = PAGE_LIMIT
start = (page - 1) * limit if page > 0 else 0
if params.ed:
r = db.query(
"SELECT a.id, a.firstname, a.lastname, a.username, a.email, a.telephone, "
"a.is_active, b.id as role, b.name role_name "
"FROM users a, user_roles b "
"WHERE a.id = $id AND a.user_role = b.id", {'id': params.ed})
if r and (session.role == 'Administrator' or '%s' % session.sesid == edit_val):
u = r[0]
firstname = u.firstname
lastname = u.lastname
telephone = u.telephone
email = u.email
username = u.username
user_role = u.role
role_name = u.role_name
is_active = u.is_active
is_super = True if u.role == 'Administrator' else False
if params.d_id:
if session.role == 'Administrator':
db.query("DELETE FROM users WHERE id=$id", {'id': params.d_id})
roles = db.query("SELECT id, name FROM user_roles ORDER by name")
current_role_id = userRolesByName[session.role]
criteria = ""
if session.role == 'Administrator':
dic = lit(
relations='users a, user_roles b',
fields="a.id, a.firstname, a.lastname, a.username, a.email, a.telephone, b.name as role ",
criteia="a.user_role = b.id",
order="a.firstname, a.lastname",
limit=limit, offset=start)
else:
dic = lit(
relations='users a, user_roles b',
fields="a.id, a.firstname, a.lastname, a.username, a.email, a.telephone, b.name as role ",
criteria="a.user_role = b.id AND a.id=%s" % session.sesid,
order="a.firstname, a.lastname",
limit=limit, offset=start)
users = doquery(db, dic)
count = countquery(db, dic)
pagination_str = getPaginationString(default(page, 0), count, limit, 2, "users", "?page=")
l = locals()
del l['self']
return render.users(**l)
@csrf_protected
def POST(self):
params = web.input(
firstname="", lastname="", telephone="", username="", email="", passwd="",
cpasswd="", is_active="", is_super="", page="1", ed="", d_id="", user_role="")
try:
page = int(params.page)
except:
page = 1
is_active = 't' if params.is_active == "on" else 'f'
# role = 'Administrator' if params.is_super == "on" else 'Basic'
with db.transaction():
if params.ed:
db.query(
"UPDATE users SET firstname=$firstname, lastname=$lastname, "
"telephone=$telephone, email=$email, username=$username, "
"password = crypt($cpasswd, gen_salt('bf')), "
"is_active=$is_active, "
"user_role=$role "
"WHERE id = $id", {
'firstname': params.firstname, 'lastname': params.lastname,
'telephone': params.telephone, 'email': params.email,
'username': params.username, 'cpasswd': params.cpasswd,
'role': params.user_role, 'is_active': is_active, 'id': params.ed
}
)
return web.seeother("/users")
else:
db.query(
"INSERT INTO users (firstname, lastname, telephone, email, "
"username, password, is_active, user_role) "
"VALUES($firstname, $lastname, $telephone, $email, $username, "
"crypt($cpasswd, gen_salt('bf')), $is_active, "
"$role)", {
'firstname': params.firstname, 'lastname': params.lastname,
'telephone': params.telephone, 'email': params.email,
'username': params.username, 'cpasswd': params.cpasswd,
'role': params.user_role, 'is_active': is_active, 'id': params.ed
}
)
return web.seeother("/users")
l = locals()
del l['self']
return render.users(**l)
|
sekiskylink/dispatcher-2.1
|
web/app/controllers/users_handler.py
|
Python
|
gpl-3.0
| 4,860
|
import os
import pytest
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
SITE_DIR = os.path.join(BASE_DIR, "site")
@pytest.fixture
def site_dir():
return SITE_DIR
@pytest.fixture
def output_exist():
return lambda path: os.path.exists(os.path.join(SITE_DIR, "deploy", path))
@pytest.fixture(autouse=True)
def chdir():
from catsup.options import g
os.chdir(SITE_DIR)
g.cwdpath = SITE_DIR
|
whtsky/Catsup
|
tests/conftest.py
|
Python
|
mit
| 419
|
#!/afs/bx.psu.edu/project/pythons/py2.7-linux-x86_64-ucs4/bin/python2.7
"""
Read a MAF and print counts and frequencies of all n-mers
(words composed on n consecutive alignment columns)
TODO: reconcile this and maf_mapping_word_frequency.py
usage: %prog n < maf_file
"""
from __future__ import division
import psyco; psyco.profile()
from bx.cookbook import doc_optparse
import string
import sys
from align import maf
def __main__():
motif_len = int( sys.argv[1] )
big_map = {}
total = 0
maf_reader = maf.Reader( sys.stdin )
for m in maf_reader:
texts = [ c.text.upper() for c in m.components ]
for i in range( m.text_size - motif_len ):
motif = string.join( [ text[ i : i + motif_len ] for text in texts ] )
if big_map.has_key( motif ): big_map[ motif ] += 1
else: big_map[ motif ] = 1
total += 1
items = zip( big_map.values(), big_map.keys() )
items.sort()
items.reverse()
for count, motif in items:
print "%d\t%0.10f\t%s" % ( count, count / total, motif )
if __name__ == "__main__": __main__()
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/eggs/bx_python-0.7.2-py2.7-linux-x86_64-ucs4.egg/EGG-INFO/scripts/maf_word_frequency.py
|
Python
|
gpl-3.0
| 1,125
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute.plugins.v3 import admin_password
from nova.compute import api as compute_api
from nova import exception
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
def fake_get(self, context, id):
return {'uuid': id}
def fake_get_non_existed(self, context, id):
raise exception.InstanceNotFound(instance_id=id)
def fake_set_admin_password(self, context, instance, password=None):
pass
def fake_set_admin_password_failed(self, context, instance, password=None):
raise exception.InstancePasswordSetFailed(instance=instance, reason='')
def fake_set_admin_password_non_implement(self, context, instance,
password=None):
raise NotImplementedError()
class AdminPasswordTest(test.TestCase):
def setUp(self):
super(AdminPasswordTest, self).setUp()
self.stubs.Set(compute_api.API, 'set_admin_password',
fake_set_admin_password)
self.stubs.Set(compute_api.API, 'get', fake_get)
self.app = fakes.wsgi_app_v3(init_only=('servers',
admin_password.ALIAS))
def _make_request(self, url, body):
req = webob.Request.blank(url)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.content_type = 'application/json'
res = req.get_response(self.app)
return res
def test_change_password(self):
url = '/v3/servers/1/action'
body = {'change_password': {'admin_password': 'test'}}
res = self._make_request(url, body)
self.assertEqual(res.status_int, 204)
def test_change_password_with_non_implement(self):
url = '/v3/servers/1/action'
body = {'change_password': {'admin_password': 'test'}}
self.stubs.Set(compute_api.API, 'set_admin_password',
fake_set_admin_password_non_implement)
res = self._make_request(url, body)
self.assertEqual(res.status_int, 501)
def test_change_password_with_non_existed_instance(self):
url = '/v3/servers/1/action'
body = {'change_password': {'admin_password': 'test'}}
self.stubs.Set(compute_api.API, 'get', fake_get_non_existed)
res = self._make_request(url, body)
self.assertEqual(res.status_int, 404)
def test_change_password_failed(self):
url = '/v3/servers/1/action'
body = {'change_password': {'admin_password': 'test'}}
self.stubs.Set(compute_api.API, 'set_admin_password',
fake_set_admin_password_failed)
res = self._make_request(url, body)
self.assertEqual(res.status_int, 409)
def test_change_password_with_bad_request(self):
url = '/v3/servers/1/action'
body = {'change_password': {}}
res = self._make_request(url, body)
self.assertEqual(res.status_int, 400)
class AdminPasswordXMLTest(test.TestCase):
def test_change_password_deserializer(self):
deserializer = admin_password.ChangePasswordDeserializer()
request = '<change_password admin_password="1"></change_password>'
expected = {'body': {'change_password': {'admin_password': '1'}}}
res = deserializer.default(request)
self.assertEqual(res, expected)
def test_change_password_deserializer_without_admin_password(self):
deserializer = admin_password.ChangePasswordDeserializer()
request = '<change_password></change_password>'
expected = {'body': {'change_password': None}}
res = deserializer.default(request)
self.assertEqual(res, expected)
|
plumgrid/plumgrid-nova
|
nova/tests/api/openstack/compute/plugins/v3/test_admin_password.py
|
Python
|
apache-2.0
| 4,400
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('fluent_contents', '0001_initial'),
('icekit', '0005_remove_layout_key'),
]
operations = [
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('file', models.FileField(upload_to=b'uploads/files/', verbose_name='File field')),
('title', models.CharField(max_length=255, blank=True)),
('is_active', models.BooleanField(default=True)),
('admin_notes', models.TextField(help_text='Internal notes for administrators only.', blank=True)),
('categories', models.ManyToManyField(related_name='file_file_related', to='icekit.MediaCategory', blank=True)),
],
options={
'abstract': False,
'db_table': 'file_file',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='FileItem',
fields=[
('contentitem_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='fluent_contents.ContentItem')),
('file', models.ForeignKey(help_text='A file from the file library.', to='icekit_plugins_file.File')),
],
options={
'abstract': False,
'db_table': 'contentitem_file_fileitem',
'verbose_name': 'File',
'verbose_name_plural': 'Files',
},
bases=('fluent_contents.contentitem',),
),
]
|
ic-labs/django-icekit
|
icekit/plugins/file/migrations/0001_initial.py
|
Python
|
mit
| 1,804
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
::
Copyright 2010 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
"""
Module which parses an icegrid XML file for configuration settings.
see ticket:800
see ticket:2213 - Replacing Java Preferences API
"""
import os
import path
import time
import logging
# To avoid conflict with omero.sys
sys = __import__("sys")
import xml.dom.minidom
from xml.etree.ElementTree import XML, Element, SubElement, Comment
from xml.etree.ElementTree import tostring
from omero_ext import portalocker
import json
class Environment(object):
"""
Object to record all the various locations
that the active configuration can come from.
"""
def __init__(self, user_specified=None):
self.fallback = "default"
self.user_specified = user_specified
self.from_os_environ = os.environ.get("OMERO_CONFIG", None)
def is_non_default(self):
if self.user_specified is not None:
return self.user_specified
elif self.from_os_environ is not None:
return self.from_os_environ
return None
def set_by_user(self, value):
self.user_specified = value
def internal_value(self, config):
props = config.props_to_dict(config.internal())
return props.get(config.DEFAULT, self.fallback)
def for_save(self, config):
"""
In some cases the environment chosen
should not be persisted.
"""
if self.user_specified:
return self.user_specified
else:
return self.internal_value(config)
def for_default(self, config):
if self.user_specified:
return self.user_specified
elif self.from_os_environ:
return self.from_os_environ
else:
return self.internal_value(config)
class ConfigXml(object):
"""
dict-like wrapper around the config.xml file usually stored
in etc/grid. For a copy of the dict, use "as_map"
"""
KEY = "omero.config.version"
VERSION = "5.1.0"
INTERNAL = "__ACTIVE__"
DEFAULT = "omero.config.profile"
IGNORE = (KEY, DEFAULT)
def __init__(self, filename, env_config=None, exclusive=True,
read_only=False):
# Logs to the class name
self.logger = logging.getLogger(self.__class__.__name__)
self.XML = None # Parsed XML Element
self.filename = filename # Path to the file to be read and written
self.env_config = Environment(env_config) # Environment override
# Whether or not an exclusive lock should be acquired
self.exclusive = exclusive
# Further, if saving should even be allowed.
self.read_only = read_only
self.save_on_close = True
self.open_source()
if self.exclusive: # must be "a+"
try:
portalocker.lock(
self.lock, portalocker.LOCK_NB | portalocker.LOCK_EX)
except portalocker.LockException:
self.lock = None # Prevent deleting of the file
self.close()
raise
self.source.seek(0)
text = self.source.read()
self.source.close()
if text:
self.XML = XML(text)
try:
self.version_check()
self.toplinks_check()
except:
self.close()
raise
# Nothing defined, so create a new tree
if self.XML is None:
default = self.default()
self.XML = Element("icegrid")
properties = SubElement(self.XML, "properties", id=self.INTERNAL)
SubElement(properties, "property", name=self.DEFAULT,
value=default)
SubElement(properties, "property", name=self.KEY,
value=self.VERSION)
properties = SubElement(self.XML, "properties", id=default)
SubElement(properties, "property", name=self.KEY,
value=self.VERSION)
def open_source(self):
self.source = None
if not self.read_only:
try:
# Try to open the file for modification
# If this fails, then the file is readonly
self.source = open(self.filename, "a+") # Open file handle
self.lock = self._open_lock() # Open file handle for lock
except IOError:
self.logger.debug("open('%s', 'a+') failed" % self.filename)
# Before we're forced to open read-only, we need to check
# that no other configuration has been requested because
# it will not be possible to modify the __ACTIVE__ setting
# once it's read-only
val = self.env_config.is_non_default()
if val is not None:
raise Exception(
"Non-default OMERO_CONFIG on read-only: %s" % val)
if self.source is None:
self.lock = None
self.exclusive = False
self.save_on_close = False
# Open file handle read-only
self.source = open(self.filename, "r")
def _open_lock(self):
return open("%s.lock" % self.filename, "a+")
def _close_lock(self):
if self.lock is not None:
self.lock.close()
self.lock = None
try:
os.remove("%s.lock" % self.filename)
except:
# On windows a WindowsError 32 can happen (file opened by
# another process), ignoring
self.logger.error("Failed to removed lock file, ignoring",
exc_info=True)
pass
def version(self, id=None):
if id is None:
id = self.default()
properties = self.properties(id)
if properties is not None:
for x in properties.getchildren():
if x.get("name") == self.KEY:
return x.get("value")
def version_check(self):
for k, v in self.properties(None, True):
version = self.version(k)
if version == "4.2.0":
self.version_fix(v, version)
def toplinks_check(self):
for k, v in self.properties(None, True):
version = self.version(k)
if version == "4.2.1" and v is not None:
for x in v.getchildren():
if x.get("name") == "omero.web.ui.top_links":
val = x.get("value", "")
toplinks = json.loads(val)
defaultlinks = [
["Data", "webindex",
{"title":
"Browse Data via Projects, Tags etc"}],
["History", "history",
{"title": "History"}],
["Help", "http://help.openmicroscopy.org/",
{"target": "new", "title":
"Open OMERO user guide in a new tab"}]]
toplinks = defaultlinks + toplinks
val = json.dumps(toplinks)
x.set("value", val)
if x.get("name") == self.KEY:
x.set("value", self.VERSION)
def version_fix(self, props, version):
"""
Currently we are assuming that all blocks without a 4.2.0 version
are bogus. The configuration script when it generates an initial
config.xml will use prefs.class to parse the existing values and
immediately do the upgrade.
"""
if version == "4.2.0":
# http://trac.openmicroscopy.org.uk/ome/ticket/2613
# Remove any reference to the ${omero.dollar} workaround
# then map anything of the form: ${...} to @{...}
if props:
for x in props.getchildren():
if x.get("name", "").startswith("omero.ldap"):
orig = x.get("value", "")
val = orig.replace("${omero.dollar}", "")
val = val.replace("${", "@{")
x.set("value", val)
self.logger.info("Upgraded 4.2.0 property: %s => %s",
orig, val)
else:
raise Exception("Version mismatch: %s has %s" %
(props.get("id"), version))
def internal(self):
return self.properties(self.INTERNAL)
def properties(self, id=None, filter_internal=False):
if self.XML is None:
return None
props = self.XML.findall("./properties")
if id is None:
rv = list()
for x in props:
id = x.attrib["id"]
if filter_internal:
if id == self.INTERNAL:
continue
rv.append((id, x))
return rv
for p in props:
if "id" in p.attrib and p.attrib["id"] == id:
return p
def remove(self, id=None):
if id is None:
id = self.default()
properties = self.properties(id)
if properties is None:
raise KeyError("No such configuration: %s" % id)
self.XML.remove(properties)
def default(self, value=None):
if value:
self.env_config.set_by_user(value)
return self.env_config.for_default(self)
def dump(self):
prop_list = self.properties()
for id, p in prop_list:
props = self.props_to_dict(p)
print "# ===> %s <===" % id
print self.dict_to_text(props)
def save(self):
"""
Creates a fresh <icegrid> block (removing any unwanted
intra-element whitespace) and overwrites the file on disk.
"""
icegrid = Element("icegrid")
comment = Comment("\n".join([
"\n",
"\tThis file was generated at %s by the OmeroConfig system.",
"\tDo not edit directly but see bin/omero config for details.",
"\tThis file may be included into your IceGrid application.",
"\n"]) % time.ctime())
icegrid.append(comment)
# First step is to add a new self.INTERNAL block to it
# which has self.DEFAULT set to the current default,
# and then copies all the values from that profile.
default = self.env_config.for_save(self)
internal = SubElement(icegrid, "properties", id=self.INTERNAL)
SubElement(internal, "property", name=self.DEFAULT, value=default)
SubElement(internal, "property", name=self.KEY, value=self.VERSION)
to_copy = self.properties(default)
if to_copy is not None:
for x in to_copy.getchildren():
if x.get("name") != self.DEFAULT and x.get("name") != self.KEY:
SubElement(internal, "property", x.attrib)
else:
# Doesn't exist, create it
properties = SubElement(icegrid, "properties", id=default)
SubElement(properties, "property", name=self.KEY,
value=self.VERSION)
# Now we simply reproduce all the other blocks
prop_list = self.properties(None, True)
for k, p in prop_list:
self.clear_text(p)
icegrid.append(p)
# Now add a single extension point which will be
# contain a parsed version of templates.xml
SubElement(icegrid, "include", file="generated.xml")
self.write_element(icegrid)
def write_element(self, icegrid):
temp_file = path.path(self.filename + ".temp")
try:
temp_file.write_text(tostring(icegrid, "utf-8"))
if sys.platform == "win32":
os.remove(self.filename)
temp_file.rename(self.filename)
try:
self._close_lock()
except:
self.logger.error("Failed to close lock", exc_info=1)
except Exception, e:
try:
temp_file.remove()
except:
self.logger.error("Failed to remove temp file")
raise e
def close(self):
try:
# If we didn't get an XML instance, then something has gone wrong
# and we should exit. Similarly, if save_on_close is False, then we
# couldn't open the file "a+"
if self.XML is not None and self.save_on_close:
self.save()
self.XML = None
finally:
try:
if self.source is not None:
self.source.close()
self.source = None
finally:
self._close_lock()
def props_to_dict(self, c):
if c is None:
return {}
rv = dict()
props = c.findall("./property")
for p in props:
if "name" in p.attrib:
rv[p.attrib["name"]] = p.attrib.get("value", "")
return rv
def dict_to_text(self, parsed=None):
if parsed is None:
return
rv = ""
for k, v in parsed.items():
rv += "%s=%s" % (k, v)
return rv
def element_to_xml(self, elem):
string = tostring(elem, 'utf-8')
return xml.dom.minidom.parseString(string).toprettyxml(" ", "\n",
None)
def clear_text(self, p):
"""
To prevent the accumulation of text outside of elements (including
whitespace) we walk the given element and remove tail from it and it's
children.
"""
p.tail = ""
p.text = ""
for p2 in p.getchildren():
self.clear_text(p2)
#
# Map interface on the default properties element
#
def as_map(self):
return self.props_to_dict(self.properties(self.default()))
def keys(self):
return self.as_map().keys()
def __getitem__(self, key):
return self.props_to_dict(self.properties(self.default()))[key]
def __setitem__(self, key, value):
default = self.default()
props = self.properties(default)
if props is None:
props = SubElement(self.XML, "properties", {"id": default})
SubElement(props, "property", name=self.KEY, value=self.VERSION)
for x in props.findall("./property"):
if x.attrib["name"] == key:
x.attrib["value"] = value
return
SubElement(props, "property", {"name": key, "value": value})
def __delitem__(self, key):
default = self.default()
props = self.properties(default)
to_remove = []
for p in props.getchildren():
if p.get("name") == key:
to_remove.append(p)
for x in to_remove:
props.remove(x)
|
tp81/openmicroscopy
|
components/tools/OmeroPy/src/omero/config.py
|
Python
|
gpl-2.0
| 15,139
|
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Logical units dealing with nodes."""
import logging
import operator
from ganeti import constants
from ganeti import errors
from ganeti import locking
from ganeti import netutils
from ganeti import objects
from ganeti import opcodes
import ganeti.rpc.node as rpc
from ganeti import utils
from ganeti.masterd import iallocator
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, ResultWithJobs
from ganeti.cmdlib.common import CheckParamsNotGlobal, \
MergeAndVerifyHvState, MergeAndVerifyDiskState, \
IsExclusiveStorageEnabledNode, CheckNodePVs, \
RedistributeAncillaryFiles, ExpandNodeUuidAndName, ShareAll, SupportsOob, \
CheckInstanceState, INSTANCE_DOWN, GetUpdatedParams, \
AdjustCandidatePool, CheckIAllocatorOrNode, LoadNodeEvacResult, \
GetWantedNodes, MapInstanceLvsToNodes, RunPostHook, \
FindFaultyInstanceDisks, CheckStorageTypeEnabled, CreateNewClientCert, \
AddNodeCertToCandidateCerts, RemoveNodeCertFromCandidateCerts, \
EnsureKvmdOnNodes
from ganeti.ssh import GetSshPortMap
def _DecideSelfPromotion(lu, exceptions=None):
"""Decide whether I should promote myself as a master candidate.
"""
cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
# the new node will increase mc_max with one, so:
mc_should = min(mc_should + 1, cp_size)
return mc_now < mc_should
def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
"""Ensure that a node has the given secondary ip.
@type lu: L{LogicalUnit}
@param lu: the LU on behalf of which we make the check
@type node: L{objects.Node}
@param node: the node to check
@type secondary_ip: string
@param secondary_ip: the ip to check
@type prereq: boolean
@param prereq: whether to throw a prerequisite or an execute error
@raise errors.OpPrereqError: if the node doesn't have the ip,
and prereq=True
@raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
"""
# this can be called with a new node, which has no UUID yet, so perform the
# RPC call using its name
result = lu.rpc.call_node_has_ip_address(node.name, secondary_ip)
result.Raise("Failure checking secondary ip on node %s" % node.name,
prereq=prereq, ecode=errors.ECODE_ENVIRON)
if not result.payload:
msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
" please fix and re-run this command" % secondary_ip)
if prereq:
raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
else:
raise errors.OpExecError(msg)
class LUNodeAdd(LogicalUnit):
"""Logical unit for adding node to the cluster.
"""
HPATH = "node-add"
HTYPE = constants.HTYPE_NODE
_NFLAGS = ["master_capable", "vm_capable"]
def CheckArguments(self):
self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
# validate/normalize the node name
self.hostname = netutils.GetHostname(name=self.op.node_name,
family=self.primary_ip_family)
self.op.node_name = self.hostname.name
if self.op.readd and self.op.node_name == self.cfg.GetMasterNodeName():
raise errors.OpPrereqError("Cannot readd the master node",
errors.ECODE_STATE)
if self.op.readd and self.op.group:
raise errors.OpPrereqError("Cannot pass a node group when a node is"
" being readded", errors.ECODE_INVAL)
def BuildHooksEnv(self):
"""Build hooks env.
This will run on all nodes before, and on all nodes + the new node after.
"""
return {
"OP_TARGET": self.op.node_name,
"NODE_NAME": self.op.node_name,
"NODE_PIP": self.op.primary_ip,
"NODE_SIP": self.op.secondary_ip,
"MASTER_CAPABLE": str(self.op.master_capable),
"VM_CAPABLE": str(self.op.vm_capable),
}
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
hook_nodes = self.cfg.GetNodeList()
new_node_info = self.cfg.GetNodeInfoByName(self.op.node_name)
if new_node_info is not None:
# Exclude added node
hook_nodes = list(set(hook_nodes) - set([new_node_info.uuid]))
# add the new node as post hook node by name; it does not have an UUID yet
return (hook_nodes, hook_nodes)
def PreparePostHookNodes(self, post_hook_node_uuids):
return post_hook_node_uuids + [self.new_node.uuid]
def CheckPrereq(self):
"""Check prerequisites.
This checks:
- the new node is not already in the config
- it is resolvable
- its parameters (single/dual homed) matches the cluster
Any errors are signaled by raising errors.OpPrereqError.
"""
node_name = self.hostname.name
self.op.primary_ip = self.hostname.ip
if self.op.secondary_ip is None:
if self.primary_ip_family == netutils.IP6Address.family:
raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
" IPv4 address must be given as secondary",
errors.ECODE_INVAL)
self.op.secondary_ip = self.op.primary_ip
secondary_ip = self.op.secondary_ip
if not netutils.IP4Address.IsValid(secondary_ip):
raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
" address" % secondary_ip, errors.ECODE_INVAL)
existing_node_info = self.cfg.GetNodeInfoByName(node_name)
if not self.op.readd and existing_node_info is not None:
raise errors.OpPrereqError("Node %s is already in the configuration" %
node_name, errors.ECODE_EXISTS)
elif self.op.readd and existing_node_info is None:
raise errors.OpPrereqError("Node %s is not in the configuration" %
node_name, errors.ECODE_NOENT)
self.changed_primary_ip = False
for existing_node in self.cfg.GetAllNodesInfo().values():
if self.op.readd and node_name == existing_node.name:
if existing_node.secondary_ip != secondary_ip:
raise errors.OpPrereqError("Readded node doesn't have the same IP"
" address configuration as before",
errors.ECODE_INVAL)
if existing_node.primary_ip != self.op.primary_ip:
self.changed_primary_ip = True
continue
if (existing_node.primary_ip == self.op.primary_ip or
existing_node.secondary_ip == self.op.primary_ip or
existing_node.primary_ip == secondary_ip or
existing_node.secondary_ip == secondary_ip):
raise errors.OpPrereqError("New node ip address(es) conflict with"
" existing node %s" % existing_node.name,
errors.ECODE_NOTUNIQUE)
# After this 'if' block, None is no longer a valid value for the
# _capable op attributes
if self.op.readd:
assert existing_node_info is not None, \
"Can't retrieve locked node %s" % node_name
for attr in self._NFLAGS:
if getattr(self.op, attr) is None:
setattr(self.op, attr, getattr(existing_node_info, attr))
else:
for attr in self._NFLAGS:
if getattr(self.op, attr) is None:
setattr(self.op, attr, True)
if self.op.readd and not self.op.vm_capable:
pri, sec = self.cfg.GetNodeInstances(existing_node_info.uuid)
if pri or sec:
raise errors.OpPrereqError("Node %s being re-added with vm_capable"
" flag set to false, but it already holds"
" instances" % node_name,
errors.ECODE_STATE)
# check that the type of the node (single versus dual homed) is the
# same as for the master
myself = self.cfg.GetMasterNodeInfo()
master_singlehomed = myself.secondary_ip == myself.primary_ip
newbie_singlehomed = secondary_ip == self.op.primary_ip
if master_singlehomed != newbie_singlehomed:
if master_singlehomed:
raise errors.OpPrereqError("The master has no secondary ip but the"
" new node has one",
errors.ECODE_INVAL)
else:
raise errors.OpPrereqError("The master has a secondary ip but the"
" new node doesn't have one",
errors.ECODE_INVAL)
# checks reachability
if not netutils.TcpPing(self.op.primary_ip, constants.DEFAULT_NODED_PORT):
raise errors.OpPrereqError("Node not reachable by ping",
errors.ECODE_ENVIRON)
if not newbie_singlehomed:
# check reachability from my secondary ip to newbie's secondary ip
if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
source=myself.secondary_ip):
raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
" based ping to node daemon port",
errors.ECODE_ENVIRON)
if self.op.readd:
exceptions = [existing_node_info.uuid]
else:
exceptions = []
if self.op.master_capable:
self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
else:
self.master_candidate = False
self.node_group = None
if self.op.readd:
self.new_node = existing_node_info
self.node_group = existing_node_info.group
else:
self.node_group = self.cfg.LookupNodeGroup(self.op.group)
self.new_node = objects.Node(name=node_name,
primary_ip=self.op.primary_ip,
secondary_ip=secondary_ip,
master_candidate=self.master_candidate,
offline=False, drained=False,
group=self.node_group, ndparams={})
if self.op.ndparams:
utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
CheckParamsNotGlobal(self.op.ndparams, constants.NDC_GLOBALS, "node",
"node", "cluster or group")
if self.op.hv_state:
self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state, None)
if self.op.disk_state:
self.new_disk_state = MergeAndVerifyDiskState(self.op.disk_state, None)
# TODO: If we need to have multiple DnsOnlyRunner we probably should make
# it a property on the base class.
rpcrunner = rpc.DnsOnlyRunner()
result = rpcrunner.call_version([node_name])[node_name]
result.Raise("Can't get version information from node %s" % node_name,
prereq=True, ecode=errors.ECODE_ENVIRON)
if constants.PROTOCOL_VERSION == result.payload:
logging.info("Communication to node %s fine, sw version %s match",
node_name, result.payload)
else:
raise errors.OpPrereqError("Version mismatch master version %s,"
" node version %s" %
(constants.PROTOCOL_VERSION, result.payload),
errors.ECODE_ENVIRON)
vg_name = self.cfg.GetVGName()
if vg_name is not None:
vparams = {constants.NV_PVLIST: [vg_name]}
excl_stor = IsExclusiveStorageEnabledNode(self.cfg, self.new_node)
cname = self.cfg.GetClusterName()
result = rpcrunner.call_node_verify_light(
[node_name], vparams, cname,
self.cfg.GetClusterInfo().hvparams,
{node_name: self.node_group},
self.cfg.GetAllNodeGroupsInfoDict()
)[node_name]
(errmsgs, _) = CheckNodePVs(result.payload, excl_stor)
if errmsgs:
raise errors.OpPrereqError("Checks on node PVs failed: %s" %
"; ".join(errmsgs), errors.ECODE_ENVIRON)
def _InitOpenVSwitch(self):
filled_ndparams = self.cfg.GetClusterInfo().FillND(
self.new_node, self.cfg.GetNodeGroup(self.new_node.group))
ovs = filled_ndparams.get(constants.ND_OVS, None)
ovs_name = filled_ndparams.get(constants.ND_OVS_NAME, None)
ovs_link = filled_ndparams.get(constants.ND_OVS_LINK, None)
if ovs:
if not ovs_link:
self.LogInfo("No physical interface for OpenvSwitch was given."
" OpenvSwitch will not have an outside connection. This"
" might not be what you want.")
result = self.rpc.call_node_configure_ovs(
self.new_node.name, ovs_name, ovs_link)
result.Raise("Failed to initialize OpenVSwitch on new node")
def _SshUpdate(self, new_node_uuid, new_node_name, is_master_candidate,
is_potential_master_candidate, rpcrunner, readd):
"""Update the SSH setup of all nodes after adding a new node.
@type readd: boolean
@param readd: whether or not this node is readded
"""
potential_master_candidates = self.cfg.GetPotentialMasterCandidates()
master_node = self.cfg.GetMasterNode()
port_map = GetSshPortMap(potential_master_candidates, self.cfg)
if readd:
# clear previous keys
master_candidate_uuids = self.cfg.GetMasterCandidateUuids()
remove_result = rpcrunner.call_node_ssh_key_remove(
[master_node],
new_node_uuid, new_node_name,
master_candidate_uuids,
potential_master_candidates,
port_map,
True, # from authorized keys
True, # from public keys
False, # clear authorized keys
True) # clear public keys
remove_result[master_node].Raise(
"Could not remove SSH keys of node %s before readding,"
" (UUID: %s)." % (new_node_name, new_node_uuid))
result = rpcrunner.call_node_ssh_key_add(
[master_node], new_node_uuid, new_node_name,
potential_master_candidates, port_map,
is_master_candidate, is_potential_master_candidate,
is_potential_master_candidate)
result[master_node].Raise("Could not update the node's SSH setup.")
def Exec(self, feedback_fn):
"""Adds the new node to the cluster.
"""
assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER), \
"Not owning BGL"
# We adding a new node so we assume it's powered
self.new_node.powered = True
# for re-adds, reset the offline/drained/master-candidate flags;
# we need to reset here, otherwise offline would prevent RPC calls
# later in the procedure; this also means that if the re-add
# fails, we are left with a non-offlined, broken node
if self.op.readd:
self.new_node.offline = False
self.new_node.drained = False
self.LogInfo("Readding a node, the offline/drained flags were reset")
# if we demote the node, we do cleanup later in the procedure
self.new_node.master_candidate = self.master_candidate
if self.changed_primary_ip:
self.new_node.primary_ip = self.op.primary_ip
# copy the master/vm_capable flags
for attr in self._NFLAGS:
setattr(self.new_node, attr, getattr(self.op, attr))
# notify the user about any possible mc promotion
if self.new_node.master_candidate:
self.LogInfo("Node will be a master candidate")
if self.op.ndparams:
self.new_node.ndparams = self.op.ndparams
else:
self.new_node.ndparams = {}
if self.op.hv_state:
self.new_node.hv_state_static = self.new_hv_state
if self.op.disk_state:
self.new_node.disk_state_static = self.new_disk_state
# Add node to our /etc/hosts, and add key to known_hosts
if self.cfg.GetClusterInfo().modify_etc_hosts:
master_node = self.cfg.GetMasterNode()
result = self.rpc.call_etc_hosts_modify(
master_node, constants.ETC_HOSTS_ADD, self.hostname.name,
self.hostname.ip)
result.Raise("Can't update hosts file with new host data")
if self.new_node.secondary_ip != self.new_node.primary_ip:
_CheckNodeHasSecondaryIP(self, self.new_node, self.new_node.secondary_ip,
False)
node_verifier_uuids = [self.cfg.GetMasterNode()]
node_verify_param = {
constants.NV_NODELIST: ([self.new_node.name], {}, []),
# TODO: do a node-net-test as well?
}
result = self.rpc.call_node_verify(
node_verifier_uuids, node_verify_param,
self.cfg.GetClusterName(),
self.cfg.GetClusterInfo().hvparams,
{self.new_node.name: self.cfg.LookupNodeGroup(self.node_group)},
self.cfg.GetAllNodeGroupsInfoDict()
)
for verifier in node_verifier_uuids:
result[verifier].Raise("Cannot communicate with node %s" % verifier)
nl_payload = result[verifier].payload[constants.NV_NODELIST]
if nl_payload:
for failed in nl_payload:
feedback_fn("ssh/hostname verification failed"
" (checking from %s): %s" %
(verifier, nl_payload[failed]))
raise errors.OpExecError("ssh/hostname verification failed")
self._InitOpenVSwitch()
if self.op.readd:
self.context.ReaddNode(self.new_node)
RedistributeAncillaryFiles(self)
# make sure we redistribute the config
self.cfg.Update(self.new_node, feedback_fn)
# and make sure the new node will not have old files around
if not self.new_node.master_candidate:
result = self.rpc.call_node_demote_from_mc(self.new_node.uuid)
result.Warn("Node failed to demote itself from master candidate status",
self.LogWarning)
else:
self.context.AddNode(self.cfg, self.new_node, self.proc.GetECId())
RedistributeAncillaryFiles(self)
# We create a new certificate even if the node is readded
digest = CreateNewClientCert(self, self.new_node.uuid)
if self.new_node.master_candidate:
self.cfg.AddNodeToCandidateCerts(self.new_node.uuid, digest)
else:
self.cfg.RemoveNodeFromCandidateCerts(self.new_node.uuid, warn_fn=None)
EnsureKvmdOnNodes(self, feedback_fn, nodes=[self.new_node.uuid])
# Update SSH setup of all nodes
if self.op.node_setup:
# FIXME: so far, all nodes are considered potential master candidates
self._SshUpdate(self.new_node.uuid, self.new_node.name,
self.new_node.master_candidate, True,
self.rpc, self.op.readd)
class LUNodeSetParams(LogicalUnit):
"""Modifies the parameters of a node.
@cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
to the node role (as _ROLE_*)
@cvar _R2F: a dictionary from node role to tuples of flags
@cvar _FLAGS: a list of attribute names corresponding to the flags
"""
HPATH = "node-modify"
HTYPE = constants.HTYPE_NODE
REQ_BGL = False
(_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
_F2R = {
(True, False, False): _ROLE_CANDIDATE,
(False, True, False): _ROLE_DRAINED,
(False, False, True): _ROLE_OFFLINE,
(False, False, False): _ROLE_REGULAR,
}
_R2F = dict((v, k) for k, v in _F2R.items())
_FLAGS = ["master_candidate", "drained", "offline"]
def CheckArguments(self):
(self.op.node_uuid, self.op.node_name) = \
ExpandNodeUuidAndName(self.cfg, self.op.node_uuid, self.op.node_name)
all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
self.op.master_capable, self.op.vm_capable,
self.op.secondary_ip, self.op.ndparams, self.op.hv_state,
self.op.disk_state]
if all_mods.count(None) == len(all_mods):
raise errors.OpPrereqError("Please pass at least one modification",
errors.ECODE_INVAL)
if all_mods.count(True) > 1:
raise errors.OpPrereqError("Can't set the node into more than one"
" state at the same time",
errors.ECODE_INVAL)
# Boolean value that tells us whether we might be demoting from MC
self.might_demote = (self.op.master_candidate is False or
self.op.offline is True or
self.op.drained is True or
self.op.master_capable is False)
if self.op.secondary_ip:
if not netutils.IP4Address.IsValid(self.op.secondary_ip):
raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
" address" % self.op.secondary_ip,
errors.ECODE_INVAL)
self.lock_all = self.op.auto_promote and self.might_demote
self.lock_instances = self.op.secondary_ip is not None
def _InstanceFilter(self, instance):
"""Filter for getting affected instances.
"""
disks = self.cfg.GetInstanceDisks(instance.uuid)
any_mirrored = utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR)
return (any_mirrored and
self.op.node_uuid in self.cfg.GetInstanceNodes(instance.uuid))
def ExpandNames(self):
if self.lock_all:
self.needed_locks = {
locking.LEVEL_NODE: locking.ALL_SET,
}
else:
self.needed_locks = {
locking.LEVEL_NODE: self.op.node_uuid,
}
# Since modifying a node can have severe effects on currently running
# operations the resource lock is at least acquired in shared mode
self.needed_locks[locking.LEVEL_NODE_RES] = \
self.needed_locks[locking.LEVEL_NODE]
# Get all locks except nodes in shared mode; they are not used for anything
# but read-only access
self.share_locks = ShareAll()
self.share_locks[locking.LEVEL_NODE] = 0
self.share_locks[locking.LEVEL_NODE_RES] = 0
if self.lock_instances:
self.needed_locks[locking.LEVEL_INSTANCE] = \
self.cfg.GetInstanceNames(
self.cfg.GetInstancesInfoByFilter(self._InstanceFilter).keys())
def BuildHooksEnv(self):
"""Build hooks env.
This runs on the master node.
"""
return {
"OP_TARGET": self.op.node_name,
"MASTER_CANDIDATE": str(self.op.master_candidate),
"OFFLINE": str(self.op.offline),
"DRAINED": str(self.op.drained),
"MASTER_CAPABLE": str(self.op.master_capable),
"VM_CAPABLE": str(self.op.vm_capable),
}
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode(), self.op.node_uuid]
return (nl, nl)
def CheckPrereq(self):
"""Check prerequisites.
This only checks the instance list against the existing names.
"""
node = self.cfg.GetNodeInfo(self.op.node_uuid)
if self.lock_instances:
affected_instances = \
self.cfg.GetInstancesInfoByFilter(self._InstanceFilter)
# Verify instance locks
owned_instance_names = self.owned_locks(locking.LEVEL_INSTANCE)
wanted_instance_names = frozenset([inst.name for inst in
affected_instances.values()])
if wanted_instance_names - owned_instance_names:
raise errors.OpPrereqError("Instances affected by changing node %s's"
" secondary IP address have changed since"
" locks were acquired, wanted '%s', have"
" '%s'; retry the operation" %
(node.name,
utils.CommaJoin(wanted_instance_names),
utils.CommaJoin(owned_instance_names)),
errors.ECODE_STATE)
else:
affected_instances = None
if (self.op.master_candidate is not None or
self.op.drained is not None or
self.op.offline is not None):
# we can't change the master's node flags
if node.uuid == self.cfg.GetMasterNode():
raise errors.OpPrereqError("The master role can be changed"
" only via master-failover",
errors.ECODE_INVAL)
if self.op.master_candidate and not node.master_capable:
raise errors.OpPrereqError("Node %s is not master capable, cannot make"
" it a master candidate" % node.name,
errors.ECODE_STATE)
if self.op.vm_capable is False:
(ipri, isec) = self.cfg.GetNodeInstances(node.uuid)
if ipri or isec:
raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
" the vm_capable flag" % node.name,
errors.ECODE_STATE)
if node.master_candidate and self.might_demote and not self.lock_all:
assert not self.op.auto_promote, "auto_promote set but lock_all not"
# check if after removing the current node, we're missing master
# candidates
(mc_remaining, mc_should, _) = \
self.cfg.GetMasterCandidateStats(exceptions=[node.uuid])
if mc_remaining < mc_should:
raise errors.OpPrereqError("Not enough master candidates, please"
" pass auto promote option to allow"
" promotion (--auto-promote or RAPI"
" auto_promote=True)", errors.ECODE_STATE)
self.old_flags = old_flags = (node.master_candidate,
node.drained, node.offline)
assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags)
self.old_role = old_role = self._F2R[old_flags]
# Check for ineffective changes
for attr in self._FLAGS:
if getattr(self.op, attr) is False and getattr(node, attr) is False:
self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
setattr(self.op, attr, None)
# Past this point, any flag change to False means a transition
# away from the respective state, as only real changes are kept
# TODO: We might query the real power state if it supports OOB
if SupportsOob(self.cfg, node):
if self.op.offline is False and not (node.powered or
self.op.powered is True):
raise errors.OpPrereqError(("Node %s needs to be turned on before its"
" offline status can be reset") %
self.op.node_name, errors.ECODE_STATE)
elif self.op.powered is not None:
raise errors.OpPrereqError(("Unable to change powered state for node %s"
" as it does not support out-of-band"
" handling") % self.op.node_name,
errors.ECODE_STATE)
# If we're being deofflined/drained, we'll MC ourself if needed
if (self.op.drained is False or self.op.offline is False or
(self.op.master_capable and not node.master_capable)):
if _DecideSelfPromotion(self):
self.op.master_candidate = True
self.LogInfo("Auto-promoting node to master candidate")
# If we're no longer master capable, we'll demote ourselves from MC
if self.op.master_capable is False and node.master_candidate:
self.LogInfo("Demoting from master candidate")
self.op.master_candidate = False
# Compute new role
assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
if self.op.master_candidate:
new_role = self._ROLE_CANDIDATE
elif self.op.drained:
new_role = self._ROLE_DRAINED
elif self.op.offline:
new_role = self._ROLE_OFFLINE
elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
# False is still in new flags, which means we're un-setting (the
# only) True flag
new_role = self._ROLE_REGULAR
else: # no new flags, nothing, keep old role
new_role = old_role
self.new_role = new_role
if old_role == self._ROLE_OFFLINE and new_role != old_role:
# Trying to transition out of offline status
result = self.rpc.call_version([node.uuid])[node.uuid]
if result.fail_msg:
raise errors.OpPrereqError("Node %s is being de-offlined but fails"
" to report its version: %s" %
(node.name, result.fail_msg),
errors.ECODE_STATE)
else:
self.LogWarning("Transitioning node from offline to online state"
" without using re-add. Please make sure the node"
" is healthy!")
# When changing the secondary ip, verify if this is a single-homed to
# multi-homed transition or vice versa, and apply the relevant
# restrictions.
if self.op.secondary_ip:
# Ok even without locking, because this can't be changed by any LU
master = self.cfg.GetMasterNodeInfo()
master_singlehomed = master.secondary_ip == master.primary_ip
if master_singlehomed and self.op.secondary_ip != node.primary_ip:
if self.op.force and node.uuid == master.uuid:
self.LogWarning("Transitioning from single-homed to multi-homed"
" cluster; all nodes will require a secondary IP"
" address")
else:
raise errors.OpPrereqError("Changing the secondary ip on a"
" single-homed cluster requires the"
" --force option to be passed, and the"
" target node to be the master",
errors.ECODE_INVAL)
elif not master_singlehomed and self.op.secondary_ip == node.primary_ip:
if self.op.force and node.uuid == master.uuid:
self.LogWarning("Transitioning from multi-homed to single-homed"
" cluster; secondary IP addresses will have to be"
" removed")
else:
raise errors.OpPrereqError("Cannot set the secondary IP to be the"
" same as the primary IP on a multi-homed"
" cluster, unless the --force option is"
" passed, and the target node is the"
" master", errors.ECODE_INVAL)
assert not (set([inst.name for inst in affected_instances.values()]) -
self.owned_locks(locking.LEVEL_INSTANCE))
if node.offline:
if affected_instances:
msg = ("Cannot change secondary IP address: offline node has"
" instances (%s) configured to use it" %
utils.CommaJoin(
[inst.name for inst in affected_instances.values()]))
raise errors.OpPrereqError(msg, errors.ECODE_STATE)
else:
# On online nodes, check that no instances are running, and that
# the node has the new ip and we can reach it.
for instance in affected_instances.values():
CheckInstanceState(self, instance, INSTANCE_DOWN,
msg="cannot change secondary ip")
_CheckNodeHasSecondaryIP(self, node, self.op.secondary_ip, True)
if master.uuid != node.uuid:
# check reachability from master secondary ip to new secondary ip
if not netutils.TcpPing(self.op.secondary_ip,
constants.DEFAULT_NODED_PORT,
source=master.secondary_ip):
raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
" based ping to node daemon port",
errors.ECODE_ENVIRON)
if self.op.ndparams:
new_ndparams = GetUpdatedParams(node.ndparams, self.op.ndparams)
utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
CheckParamsNotGlobal(self.op.ndparams, constants.NDC_GLOBALS, "node",
"node", "cluster or group")
self.new_ndparams = new_ndparams
if self.op.hv_state:
self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
node.hv_state_static)
if self.op.disk_state:
self.new_disk_state = \
MergeAndVerifyDiskState(self.op.disk_state, node.disk_state_static)
def Exec(self, feedback_fn):
"""Modifies a node.
"""
node = self.cfg.GetNodeInfo(self.op.node_uuid)
result = []
if self.op.ndparams:
node.ndparams = self.new_ndparams
if self.op.powered is not None:
node.powered = self.op.powered
if self.op.hv_state:
node.hv_state_static = self.new_hv_state
if self.op.disk_state:
node.disk_state_static = self.new_disk_state
for attr in ["master_capable", "vm_capable"]:
val = getattr(self.op, attr)
if val is not None:
setattr(node, attr, val)
result.append((attr, str(val)))
if self.op.secondary_ip:
node.secondary_ip = self.op.secondary_ip
result.append(("secondary_ip", self.op.secondary_ip))
# this will trigger configuration file update, if needed
self.cfg.Update(node, feedback_fn)
if self.new_role != self.old_role:
new_flags = self._R2F[self.new_role]
for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
if of != nf:
result.append((desc, str(nf)))
(node.master_candidate, node.drained, node.offline) = new_flags
self.cfg.Update(node, feedback_fn)
# Tell the node to demote itself, if no longer MC and not offline.
# This must be done only after the configuration is updated so that
# it's ensured the node won't receive any further configuration updates.
if self.old_role == self._ROLE_CANDIDATE and \
self.new_role != self._ROLE_OFFLINE:
msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
if msg:
self.LogWarning("Node failed to demote itself: %s", msg)
# we locked all nodes, we adjust the CP before updating this node
if self.lock_all:
AdjustCandidatePool(self, [node.uuid])
# if node gets promoted, grant RPC priviledges
if self.new_role == self._ROLE_CANDIDATE:
AddNodeCertToCandidateCerts(self, self.cfg, node.uuid)
# if node is demoted, revoke RPC priviledges
if self.old_role == self._ROLE_CANDIDATE:
RemoveNodeCertFromCandidateCerts(self.cfg, node.uuid)
EnsureKvmdOnNodes(self, feedback_fn, nodes=[node.uuid])
# this will trigger job queue propagation or cleanup if the mc
# flag changed
if [self.old_role, self.new_role].count(self._ROLE_CANDIDATE) == 1:
self.context.ReaddNode(node)
if self.cfg.GetClusterInfo().modify_ssh_setup:
potential_master_candidates = self.cfg.GetPotentialMasterCandidates()
ssh_port_map = GetSshPortMap(potential_master_candidates, self.cfg)
master_node = self.cfg.GetMasterNode()
if self.old_role == self._ROLE_CANDIDATE:
master_candidate_uuids = self.cfg.GetMasterCandidateUuids()
ssh_result = self.rpc.call_node_ssh_key_remove(
[master_node],
node.uuid, node.name,
master_candidate_uuids, potential_master_candidates, ssh_port_map,
True, # remove node's key from all nodes' authorized_keys file
False, # currently, all nodes are potential master candidates
False, # do not clear node's 'authorized_keys'
False) # do not clear node's 'ganeti_pub_keys'
ssh_result[master_node].Raise(
"Could not adjust the SSH setup after demoting node '%s'"
" (UUID: %s)." % (node.name, node.uuid))
if self.new_role == self._ROLE_CANDIDATE:
ssh_result = self.rpc.call_node_ssh_key_add(
[master_node], node.uuid, node.name,
potential_master_candidates, ssh_port_map,
True, # add node's key to all node's 'authorized_keys'
True, # all nodes are potential master candidates
False) # do not update the node's public keys
ssh_result[master_node].Raise(
"Could not update the SSH setup of node '%s' after promotion"
" (UUID: %s)." % (node.name, node.uuid))
return result
class LUNodePowercycle(NoHooksLU):
"""Powercycles a node.
"""
REQ_BGL = False
def CheckArguments(self):
(self.op.node_uuid, self.op.node_name) = \
ExpandNodeUuidAndName(self.cfg, self.op.node_uuid, self.op.node_name)
if self.op.node_uuid == self.cfg.GetMasterNode() and not self.op.force:
raise errors.OpPrereqError("The node is the master and the force"
" parameter was not set",
errors.ECODE_INVAL)
def ExpandNames(self):
"""Locking for PowercycleNode.
This is a last-resort option and shouldn't block on other
jobs. Therefore, we grab no locks.
"""
self.needed_locks = {}
def Exec(self, feedback_fn):
"""Reboots a node.
"""
default_hypervisor = self.cfg.GetHypervisorType()
hvparams = self.cfg.GetClusterInfo().hvparams[default_hypervisor]
result = self.rpc.call_node_powercycle(self.op.node_uuid,
default_hypervisor,
hvparams)
result.Raise("Failed to schedule the reboot")
return result.payload
def _GetNodeInstancesInner(cfg, fn):
return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
def _GetNodePrimaryInstances(cfg, node_uuid):
"""Returns primary instances on a node.
"""
return _GetNodeInstancesInner(cfg,
lambda inst: node_uuid == inst.primary_node)
def _GetNodeSecondaryInstances(cfg, node_uuid):
"""Returns secondary instances on a node.
"""
return _GetNodeInstancesInner(cfg,
lambda inst: node_uuid in
cfg.GetInstanceSecondaryNodes(inst.uuid))
def _GetNodeInstances(cfg, node_uuid):
"""Returns a list of all primary and secondary instances on a node.
"""
return _GetNodeInstancesInner(cfg,
lambda inst: node_uuid in
cfg.GetInstanceNodes(inst.uuid))
class LUNodeEvacuate(NoHooksLU):
"""Evacuates instances off a list of nodes.
"""
REQ_BGL = False
def CheckArguments(self):
CheckIAllocatorOrNode(self, "iallocator", "remote_node")
def ExpandNames(self):
(self.op.node_uuid, self.op.node_name) = \
ExpandNodeUuidAndName(self.cfg, self.op.node_uuid, self.op.node_name)
if self.op.remote_node is not None:
(self.op.remote_node_uuid, self.op.remote_node) = \
ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
self.op.remote_node)
assert self.op.remote_node
if self.op.node_uuid == self.op.remote_node_uuid:
raise errors.OpPrereqError("Can not use evacuated node as a new"
" secondary node", errors.ECODE_INVAL)
if self.op.mode != constants.NODE_EVAC_SEC:
raise errors.OpPrereqError("Without the use of an iallocator only"
" secondary instances can be evacuated",
errors.ECODE_INVAL)
# Declare locks
self.share_locks = ShareAll()
self.needed_locks = {
locking.LEVEL_INSTANCE: [],
locking.LEVEL_NODEGROUP: [],
locking.LEVEL_NODE: [],
}
# Determine nodes (via group) optimistically, needs verification once locks
# have been acquired
self.lock_nodes = self._DetermineNodes()
def _DetermineNodes(self):
"""Gets the list of node UUIDs to operate on.
"""
if self.op.remote_node is None:
# Iallocator will choose any node(s) in the same group
group_nodes = self.cfg.GetNodeGroupMembersByNodes([self.op.node_uuid])
else:
group_nodes = frozenset([self.op.remote_node_uuid])
# Determine nodes to be locked
return set([self.op.node_uuid]) | group_nodes
def _DetermineInstances(self):
"""Builds list of instances to operate on.
"""
assert self.op.mode in constants.NODE_EVAC_MODES
if self.op.mode == constants.NODE_EVAC_PRI:
# Primary instances only
inst_fn = _GetNodePrimaryInstances
assert self.op.remote_node is None, \
"Evacuating primary instances requires iallocator"
elif self.op.mode == constants.NODE_EVAC_SEC:
# Secondary instances only
inst_fn = _GetNodeSecondaryInstances
else:
# All instances
assert self.op.mode == constants.NODE_EVAC_ALL
inst_fn = _GetNodeInstances
# TODO: In 2.6, change the iallocator interface to take an evacuation mode
# per instance
raise errors.OpPrereqError("Due to an issue with the iallocator"
" interface it is not possible to evacuate"
" all instances at once; specify explicitly"
" whether to evacuate primary or secondary"
" instances",
errors.ECODE_INVAL)
return inst_fn(self.cfg, self.op.node_uuid)
def DeclareLocks(self, level):
if level == locking.LEVEL_INSTANCE:
# Lock instances optimistically, needs verification once node and group
# locks have been acquired
self.needed_locks[locking.LEVEL_INSTANCE] = \
set(i.name for i in self._DetermineInstances())
elif level == locking.LEVEL_NODEGROUP:
# Lock node groups for all potential target nodes optimistically, needs
# verification once nodes have been acquired
self.needed_locks[locking.LEVEL_NODEGROUP] = \
self.cfg.GetNodeGroupsFromNodes(self.lock_nodes)
elif level == locking.LEVEL_NODE:
self.needed_locks[locking.LEVEL_NODE] = self.lock_nodes
def CheckPrereq(self):
# Verify locks
owned_instance_names = self.owned_locks(locking.LEVEL_INSTANCE)
owned_nodes = self.owned_locks(locking.LEVEL_NODE)
owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
need_nodes = self._DetermineNodes()
if not owned_nodes.issuperset(need_nodes):
raise errors.OpPrereqError("Nodes in same group as '%s' changed since"
" locks were acquired, current nodes are"
" are '%s', used to be '%s'; retry the"
" operation" %
(self.op.node_name,
utils.CommaJoin(need_nodes),
utils.CommaJoin(owned_nodes)),
errors.ECODE_STATE)
wanted_groups = self.cfg.GetNodeGroupsFromNodes(owned_nodes)
if owned_groups != wanted_groups:
raise errors.OpExecError("Node groups changed since locks were acquired,"
" current groups are '%s', used to be '%s';"
" retry the operation" %
(utils.CommaJoin(wanted_groups),
utils.CommaJoin(owned_groups)))
# Determine affected instances
self.instances = self._DetermineInstances()
self.instance_names = [i.name for i in self.instances]
if set(self.instance_names) != owned_instance_names:
raise errors.OpExecError("Instances on node '%s' changed since locks"
" were acquired, current instances are '%s',"
" used to be '%s'; retry the operation" %
(self.op.node_name,
utils.CommaJoin(self.instance_names),
utils.CommaJoin(owned_instance_names)))
if self.instance_names:
self.LogInfo("Evacuating instances from node '%s': %s",
self.op.node_name,
utils.CommaJoin(utils.NiceSort(self.instance_names)))
else:
self.LogInfo("No instances to evacuate from node '%s'",
self.op.node_name)
if self.op.remote_node is not None:
for i in self.instances:
if i.primary_node == self.op.remote_node_uuid:
raise errors.OpPrereqError("Node %s is the primary node of"
" instance %s, cannot use it as"
" secondary" %
(self.op.remote_node, i.name),
errors.ECODE_INVAL)
def Exec(self, feedback_fn):
assert (self.op.iallocator is not None) ^ (self.op.remote_node is not None)
if not self.instance_names:
# No instances to evacuate
jobs = []
elif self.op.iallocator is not None:
# TODO: Implement relocation to other group
req = iallocator.IAReqNodeEvac(evac_mode=self.op.mode,
instances=list(self.instance_names))
ial = iallocator.IAllocator(self.cfg, self.rpc, req)
ial.Run(self.op.iallocator)
if not ial.success:
raise errors.OpPrereqError("Can't compute node evacuation using"
" iallocator '%s': %s" %
(self.op.iallocator, ial.info),
errors.ECODE_NORES)
jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, True)
elif self.op.remote_node is not None:
assert self.op.mode == constants.NODE_EVAC_SEC
jobs = [
[opcodes.OpInstanceReplaceDisks(instance_name=instance_name,
remote_node=self.op.remote_node,
disks=[],
mode=constants.REPLACE_DISK_CHG,
early_release=self.op.early_release)]
for instance_name in self.instance_names]
else:
raise errors.ProgrammerError("No iallocator or remote node")
return ResultWithJobs(jobs)
class LUNodeMigrate(LogicalUnit):
"""Migrate all instances from a node.
"""
HPATH = "node-migrate"
HTYPE = constants.HTYPE_NODE
REQ_BGL = False
def CheckArguments(self):
pass
def ExpandNames(self):
(self.op.node_uuid, self.op.node_name) = \
ExpandNodeUuidAndName(self.cfg, self.op.node_uuid, self.op.node_name)
self.share_locks = ShareAll()
self.needed_locks = {
locking.LEVEL_NODE: [self.op.node_uuid],
}
def BuildHooksEnv(self):
"""Build hooks env.
This runs on the master, the primary and all the secondaries.
"""
return {
"NODE_NAME": self.op.node_name,
"ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
}
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()]
return (nl, nl)
def CheckPrereq(self):
pass
def Exec(self, feedback_fn):
# Prepare jobs for migration instances
jobs = [
[opcodes.OpInstanceMigrate(
instance_name=inst.name,
mode=self.op.mode,
live=self.op.live,
iallocator=self.op.iallocator,
target_node=self.op.target_node,
allow_runtime_changes=self.op.allow_runtime_changes,
ignore_ipolicy=self.op.ignore_ipolicy)]
for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_uuid)]
# TODO: Run iallocator in this opcode and pass correct placement options to
# OpInstanceMigrate. Since other jobs can modify the cluster between
# running the iallocator and the actual migration, a good consistency model
# will have to be found.
assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
frozenset([self.op.node_uuid]))
return ResultWithJobs(jobs)
def _GetStorageTypeArgs(cfg, storage_type):
"""Returns the arguments for a storage type.
"""
# Special case for file storage
if storage_type == constants.ST_FILE:
return [[cfg.GetFileStorageDir()]]
elif storage_type == constants.ST_SHARED_FILE:
return [[cfg.GetSharedFileStorageDir()]]
elif storage_type == constants.ST_GLUSTER:
return [[cfg.GetGlusterStorageDir()]]
else:
return []
class LUNodeModifyStorage(NoHooksLU):
"""Logical unit for modifying a storage volume on a node.
"""
REQ_BGL = False
def CheckArguments(self):
(self.op.node_uuid, self.op.node_name) = \
ExpandNodeUuidAndName(self.cfg, self.op.node_uuid, self.op.node_name)
storage_type = self.op.storage_type
try:
modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
except KeyError:
raise errors.OpPrereqError("Storage units of type '%s' can not be"
" modified" % storage_type,
errors.ECODE_INVAL)
diff = set(self.op.changes.keys()) - modifiable
if diff:
raise errors.OpPrereqError("The following fields can not be modified for"
" storage units of type '%s': %r" %
(storage_type, list(diff)),
errors.ECODE_INVAL)
def CheckPrereq(self):
"""Check prerequisites.
"""
CheckStorageTypeEnabled(self.cfg.GetClusterInfo(), self.op.storage_type)
def ExpandNames(self):
self.needed_locks = {
locking.LEVEL_NODE: self.op.node_uuid,
}
def Exec(self, feedback_fn):
"""Computes the list of nodes and their attributes.
"""
st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
result = self.rpc.call_storage_modify(self.op.node_uuid,
self.op.storage_type, st_args,
self.op.name, self.op.changes)
result.Raise("Failed to modify storage unit '%s' on %s" %
(self.op.name, self.op.node_name))
def _CheckOutputFields(fields, selected):
"""Checks whether all selected fields are valid according to fields.
@type fields: L{utils.FieldSet}
@param fields: fields set
@type selected: L{utils.FieldSet}
@param selected: fields set
"""
delta = fields.NonMatching(selected)
if delta:
raise errors.OpPrereqError("Unknown output fields selected: %s"
% ",".join(delta), errors.ECODE_INVAL)
class LUNodeQueryvols(NoHooksLU):
"""Logical unit for getting volumes on node(s).
"""
REQ_BGL = False
def CheckArguments(self):
_CheckOutputFields(utils.FieldSet(constants.VF_NODE, constants.VF_PHYS,
constants.VF_VG, constants.VF_NAME,
constants.VF_SIZE, constants.VF_INSTANCE),
self.op.output_fields)
def ExpandNames(self):
self.share_locks = ShareAll()
if self.op.nodes:
self.needed_locks = {
locking.LEVEL_NODE: GetWantedNodes(self, self.op.nodes)[0],
}
else:
self.needed_locks = {
locking.LEVEL_NODE: locking.ALL_SET,
}
def Exec(self, feedback_fn):
"""Computes the list of nodes and their attributes.
"""
node_uuids = self.owned_locks(locking.LEVEL_NODE)
volumes = self.rpc.call_node_volumes(node_uuids)
ilist = self.cfg.GetAllInstancesInfo()
vol2inst = MapInstanceLvsToNodes(self.cfg, ilist.values())
output = []
for node_uuid in node_uuids:
nresult = volumes[node_uuid]
if nresult.offline:
continue
msg = nresult.fail_msg
if msg:
self.LogWarning("Can't compute volume data on node %s: %s",
self.cfg.GetNodeName(node_uuid), msg)
continue
node_vols = sorted(nresult.payload,
key=operator.itemgetter(constants.VF_DEV))
for vol in node_vols:
node_output = []
for field in self.op.output_fields:
if field == constants.VF_NODE:
val = self.cfg.GetNodeName(node_uuid)
elif field == constants.VF_PHYS:
val = vol[constants.VF_DEV]
elif field == constants.VF_VG:
val = vol[constants.VF_VG]
elif field == constants.VF_NAME:
val = vol[constants.VF_NAME]
elif field == constants.VF_SIZE:
val = int(float(vol[constants.VF_SIZE]))
elif field == constants.VF_INSTANCE:
inst = vol2inst.get((node_uuid, vol[constants.VF_VG] + "/" +
vol[constants.VF_NAME]), None)
if inst is not None:
val = inst.name
else:
val = "-"
else:
raise errors.ParameterError(field)
node_output.append(str(val))
output.append(node_output)
return output
class LUNodeQueryStorage(NoHooksLU):
"""Logical unit for getting information on storage units on node(s).
"""
REQ_BGL = False
def CheckArguments(self):
_CheckOutputFields(utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
self.op.output_fields)
def ExpandNames(self):
self.share_locks = ShareAll()
if self.op.nodes:
self.needed_locks = {
locking.LEVEL_NODE: GetWantedNodes(self, self.op.nodes)[0],
}
else:
self.needed_locks = {
locking.LEVEL_NODE: locking.ALL_SET,
}
def _DetermineStorageType(self):
"""Determines the default storage type of the cluster.
"""
enabled_disk_templates = self.cfg.GetClusterInfo().enabled_disk_templates
default_storage_type = \
constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[enabled_disk_templates[0]]
return default_storage_type
def CheckPrereq(self):
"""Check prerequisites.
"""
if self.op.storage_type:
CheckStorageTypeEnabled(self.cfg.GetClusterInfo(), self.op.storage_type)
self.storage_type = self.op.storage_type
else:
self.storage_type = self._DetermineStorageType()
supported_storage_types = constants.STS_REPORT_NODE_STORAGE
if self.storage_type not in supported_storage_types:
raise errors.OpPrereqError(
"Storage reporting for storage type '%s' is not supported. Please"
" use the --storage-type option to specify one of the supported"
" storage types (%s) or set the default disk template to one that"
" supports storage reporting." %
(self.storage_type, utils.CommaJoin(supported_storage_types)))
def Exec(self, feedback_fn):
"""Computes the list of nodes and their attributes.
"""
if self.op.storage_type:
self.storage_type = self.op.storage_type
else:
self.storage_type = self._DetermineStorageType()
self.node_uuids = self.owned_locks(locking.LEVEL_NODE)
# Always get name to sort by
if constants.SF_NAME in self.op.output_fields:
fields = self.op.output_fields[:]
else:
fields = [constants.SF_NAME] + self.op.output_fields
# Never ask for node or type as it's only known to the LU
for extra in [constants.SF_NODE, constants.SF_TYPE]:
while extra in fields:
fields.remove(extra)
field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
name_idx = field_idx[constants.SF_NAME]
st_args = _GetStorageTypeArgs(self.cfg, self.storage_type)
data = self.rpc.call_storage_list(self.node_uuids,
self.storage_type, st_args,
self.op.name, fields)
result = []
for node_uuid in utils.NiceSort(self.node_uuids):
node_name = self.cfg.GetNodeName(node_uuid)
nresult = data[node_uuid]
if nresult.offline:
continue
msg = nresult.fail_msg
if msg:
self.LogWarning("Can't get storage data from node %s: %s",
node_name, msg)
continue
rows = dict([(row[name_idx], row) for row in nresult.payload])
for name in utils.NiceSort(rows.keys()):
row = rows[name]
out = []
for field in self.op.output_fields:
if field == constants.SF_NODE:
val = node_name
elif field == constants.SF_TYPE:
val = self.storage_type
elif field in field_idx:
val = row[field_idx[field]]
else:
raise errors.ParameterError(field)
out.append(val)
result.append(out)
return result
class LUNodeRemove(LogicalUnit):
"""Logical unit for removing a node.
"""
HPATH = "node-remove"
HTYPE = constants.HTYPE_NODE
def BuildHooksEnv(self):
"""Build hooks env.
"""
return {
"OP_TARGET": self.op.node_name,
"NODE_NAME": self.op.node_name,
}
def BuildHooksNodes(self):
"""Build hooks nodes.
This doesn't run on the target node in the pre phase as a failed
node would then be impossible to remove.
"""
all_nodes = self.cfg.GetNodeList()
try:
all_nodes.remove(self.op.node_uuid)
except ValueError:
pass
return (all_nodes, all_nodes)
def CheckPrereq(self):
"""Check prerequisites.
This checks:
- the node exists in the configuration
- it does not have primary or secondary instances
- it's not the master
Any errors are signaled by raising errors.OpPrereqError.
"""
(self.op.node_uuid, self.op.node_name) = \
ExpandNodeUuidAndName(self.cfg, self.op.node_uuid, self.op.node_name)
node = self.cfg.GetNodeInfo(self.op.node_uuid)
assert node is not None
masternode = self.cfg.GetMasterNode()
if node.uuid == masternode:
raise errors.OpPrereqError("Node is the master node, failover to another"
" node is required", errors.ECODE_INVAL)
for _, instance in self.cfg.GetAllInstancesInfo().items():
if node.uuid in self.cfg.GetInstanceNodes(instance.uuid):
raise errors.OpPrereqError("Instance %s is still running on the node,"
" please remove first" % instance.name,
errors.ECODE_INVAL)
self.op.node_name = node.name
self.node = node
def Exec(self, feedback_fn):
"""Removes the node from the cluster.
"""
logging.info("Stopping the node daemon and removing configs from node %s",
self.node.name)
modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER), \
"Not owning BGL"
if modify_ssh_setup:
# retrieve the list of potential master candidates before the node is
# removed
potential_master_candidates = self.cfg.GetPotentialMasterCandidates()
potential_master_candidate = \
self.op.node_name in potential_master_candidates
ssh_port_map = GetSshPortMap(potential_master_candidates, self.cfg)
master_candidate_uuids = self.cfg.GetMasterCandidateUuids()
master_node = self.cfg.GetMasterNode()
result = self.rpc.call_node_ssh_key_remove(
[master_node],
self.node.uuid, self.op.node_name,
master_candidate_uuids, potential_master_candidates, ssh_port_map,
self.node.master_candidate, # from_authorized_keys
potential_master_candidate, # from_public_keys
True, # clear node's 'authorized_keys'
True) # clear node's 'ganeti_public_keys'
result[master_node].Raise(
"Could not remove the SSH key of node '%s' (UUID: %s)." %
(self.op.node_name, self.node.uuid))
# Promote nodes to master candidate as needed
AdjustCandidatePool(self, [self.node.uuid])
self.context.RemoveNode(self.cfg, self.node)
# Run post hooks on the node before it's removed
RunPostHook(self, self.node.name)
# we have to call this by name rather than by UUID, as the node is no longer
# in the config
result = self.rpc.call_node_leave_cluster(self.node.name, modify_ssh_setup)
msg = result.fail_msg
if msg:
self.LogWarning("Errors encountered on the remote node while leaving"
" the cluster: %s", msg)
cluster = self.cfg.GetClusterInfo()
# Remove node from candidate certificate list
if self.node.master_candidate:
self.cfg.RemoveNodeFromCandidateCerts(self.node.uuid)
# Remove node from our /etc/hosts
if cluster.modify_etc_hosts:
master_node_uuid = self.cfg.GetMasterNode()
result = self.rpc.call_etc_hosts_modify(master_node_uuid,
constants.ETC_HOSTS_REMOVE,
self.node.name, None)
result.Raise("Can't update hosts file with new host data")
RedistributeAncillaryFiles(self)
class LURepairNodeStorage(NoHooksLU):
"""Repairs the volume group on a node.
"""
REQ_BGL = False
def CheckArguments(self):
(self.op.node_uuid, self.op.node_name) = \
ExpandNodeUuidAndName(self.cfg, self.op.node_uuid, self.op.node_name)
storage_type = self.op.storage_type
if (constants.SO_FIX_CONSISTENCY not in
constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
raise errors.OpPrereqError("Storage units of type '%s' can not be"
" repaired" % storage_type,
errors.ECODE_INVAL)
def ExpandNames(self):
self.needed_locks = {
locking.LEVEL_NODE: [self.op.node_uuid],
}
def _CheckFaultyDisks(self, instance, node_uuid):
"""Ensure faulty disks abort the opcode or at least warn."""
try:
if FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
node_uuid, True):
raise errors.OpPrereqError("Instance '%s' has faulty disks on"
" node '%s'" %
(instance.name,
self.cfg.GetNodeName(node_uuid)),
errors.ECODE_STATE)
except errors.OpPrereqError, err:
if self.op.ignore_consistency:
self.LogWarning(str(err.args[0]))
else:
raise
def CheckPrereq(self):
"""Check prerequisites.
"""
CheckStorageTypeEnabled(self.cfg.GetClusterInfo(), self.op.storage_type)
# Check whether any instance on this node has faulty disks
for inst in _GetNodeInstances(self.cfg, self.op.node_uuid):
if not inst.disks_active:
continue
check_nodes = set(self.cfg.GetInstanceNodes(inst.uuid))
check_nodes.discard(self.op.node_uuid)
for inst_node_uuid in check_nodes:
self._CheckFaultyDisks(inst, inst_node_uuid)
def Exec(self, feedback_fn):
feedback_fn("Repairing storage unit '%s' on %s ..." %
(self.op.name, self.op.node_name))
st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
result = self.rpc.call_storage_execute(self.op.node_uuid,
self.op.storage_type, st_args,
self.op.name,
constants.SO_FIX_CONSISTENCY)
result.Raise("Failed to repair storage unit '%s' on %s" %
(self.op.name, self.op.node_name))
|
apyrgio/ganeti
|
lib/cmdlib/node.py
|
Python
|
bsd-2-clause
| 64,603
|
import hashlib
import os
import dialog
import requests
# To make hash of the intended video file
def get_hash(name):
readsize = 1024*64
with open(name,'rb') as f:
size = os.path.getsize(name)
data = f.read(readsize)
f.seek(-readsize,os.SEEK_END)
data += f.read(readsize)
return hashlib.md5(data).hexdigest()
#use of subDB api
def api(file_add):
file_hash = hashlib.md5()
file_hash = get_hash(file_add)
user_agent = {'User-agent':'SubDB/1.0 (subX/0.1; http://github.com/irresolute/subX)'}
param ={'action':'download','hash':file_hash,'language':'en'}
try:
r = requests.get("http://api.thesubdb.com/",headers=user_agent,params=param)
except:
pass
if(r.status_code !=200):
dialog.showmsg('Subtitle not found check connectivity and try again','subX')
fName, fExt = os.path.splitext(file_add)
fName = fName+ '.srt'
with open(fName,'wb') as f:
f.write(r.text.encode('ascii','ignore'))
file_add = dialog.ask(message='Enter address')
api(file_add)
|
irresolute/subX
|
subX/subdb.py
|
Python
|
mit
| 981
|
# -*- coding: utf-8 -*-
# © 2015 LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, api
from openerp.exceptions import ValidationError
import requests
class WebsiteFormRecaptcha(models.AbstractModel):
""" This model provides ReCaptcha helper methods.
Nothing is stored in the DB.
"""
_name = 'website.form.recaptcha'
_description = 'Website Form Recaptcha Validations'
URL = 'https://www.google.com/recaptcha/api/siteverify'
RESPONSE_ATTR = 'g-recaptcha-response'
ERROR_MAP = {
'missing-input-secret': 'The secret parameter is missing.',
'invalid-input-secret':
'The secret parameter is invalid or malformed.',
'missing-input-response': 'The response parameter is missing.',
'invalid-input-response':
'The response parameter is invalid or malformed.',
None: 'There was a problem with the captcha entry.',
}
@api.model
def action_validate(self, response, remote_ip):
""" Validate ReCaptcha Response
Params:
response: str The value of 'g-recaptcha-response'.
remote_ip: str The end user's IP address
Raises:
ValidationError on failure
Returns:
True on success
"""
secret_key = self.env.ref(
'website_form_recaptcha.recaptcha_key_secret'
)
secret_key = secret_key.sudo().value
# @TODO: Domain validation
# domain_name = request.httprequest.environ.get(
# 'HTTP_HOST', ''
# ).split(':')[0]
data = {
'secret': secret_key,
'response': response,
'remoteip': remote_ip,
}
res = requests.post(self.URL, data=data).json()
for error in res.get('error-codes', []):
raise ValidationError(
self.ERROR_MAP.get(
error, self.ERROR_MAP[None]
)
)
if not res.get('success'):
raise ValidationError(self.ERROR_MAP[None])
return True
|
Tecnativa/website
|
website_form_recaptcha/models/website_form_recaptcha.py
|
Python
|
agpl-3.0
| 2,122
|
import unittest
import pymysql
import tap_mysql
import copy
import singer
import os
import singer.metadata
from tap_mysql.connection import connect_with_backoff
try:
import tests.utils as test_utils
except ImportError:
import utils as test_utils
import tap_mysql.sync_strategies.binlog as binlog
import tap_mysql.sync_strategies.common as common
from pymysqlreplication import BinLogStreamReader
from pymysqlreplication.event import RotateEvent
from pymysqlreplication.row_event import (
DeleteRowsEvent,
UpdateRowsEvent,
WriteRowsEvent,
)
from singer.schema import Schema
LOGGER = singer.get_logger()
SINGER_MESSAGES = []
def accumulate_singer_messages(message):
SINGER_MESSAGES.append(message)
singer.write_message = accumulate_singer_messages
class TestDateTypes(unittest.TestCase):
def setUp(self):
self.conn = test_utils.get_test_connection()
self.state = {}
log_file, log_pos = binlog.fetch_current_log_file_and_pos(self.conn)
with connect_with_backoff(self.conn) as open_conn:
with open_conn.cursor() as cursor:
cursor.execute('CREATE TABLE datetime_types (id int, datetime_col datetime, timestamp_col timestamp, time_col time, date_col date)')
cursor.execute('INSERT INTO datetime_types (id, datetime_col, timestamp_col, time_col, date_col) VALUES (1, \'0000-00-00\', \'0000-00-00 00:00:00\', \'00:00:00\', \'0000-00-00\' )')
cursor.execute('INSERT INTO datetime_types (id, datetime_col, timestamp_col, time_col, date_col) VALUES (2, NULL, NULL, NULL, NULL)')
open_conn.commit()
self.catalog = test_utils.discover_catalog(self.conn, {})
for stream in self.catalog.streams:
stream.stream = stream.table
stream.metadata = [
{'breadcrumb': (),
'metadata': {
'selected': True,
'database-name': 'tap_mysql_test',
'table-key-propertes': ['id']
}},
{'breadcrumb': ('properties', 'id'), 'metadata': {'selected': True}},
{'breadcrumb': ('properties', 'datetime_col'), 'metadata': {'selected': True}},
{'breadcrumb': ('properties', 'timestamp_col'), 'metadata': {'selected': True}},
{'breadcrumb': ('properties', 'time_col'), 'metadata': {'selected': True}},
{'breadcrumb': ('properties', 'date_col'), 'metadata': {'selected': True}}
]
test_utils.set_replication_method_and_key(stream, 'LOG_BASED', None)
self.state = singer.write_bookmark(self.state,
stream.tap_stream_id,
'log_file',
log_file)
self.state = singer.write_bookmark(self.state,
stream.tap_stream_id,
'log_pos',
log_pos)
self.state = singer.write_bookmark(self.state,
stream.tap_stream_id,
'version',
singer.utils.now())
def test_initial_full_table(self):
state = {}
expected_log_file, expected_log_pos = binlog.fetch_current_log_file_and_pos(self.conn)
global SINGER_MESSAGES
SINGER_MESSAGES.clear()
tap_mysql.do_sync(self.conn, {}, self.catalog, state)
message_types = [type(m) for m in SINGER_MESSAGES]
self.assertEqual(message_types,
[singer.StateMessage,
singer.SchemaMessage,
singer.ActivateVersionMessage,
singer.RecordMessage,
singer.RecordMessage,
singer.StateMessage,
singer.ActivateVersionMessage,
singer.StateMessage])
record_messages = list(filter(lambda m: isinstance(m, singer.RecordMessage), SINGER_MESSAGES))
# Expected from 0.7.11
expected_records = [
{'datetime_col': None,
'id': 1,
'timestamp_col': None,
'time_col': '1970-01-01T00:00:00.000000Z',
'date_col': None},
{'datetime_col': None,
'id': 2,
'timestamp_col': None,
'time_col': None,
'date_col': None}
]
self.assertEqual(expected_records, [x.asdict()['record'] for x in record_messages])
|
singer-io/tap-mysql
|
tests/nosetests/test_date_types.py
|
Python
|
agpl-3.0
| 4,760
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from singa import layer
from singa import model
class CNN(model.Model):
def __init__(self, num_classes=10, num_channels=1):
super(CNN, self).__init__()
self.num_classes = num_classes
self.input_size = 28
self.dimension = 4
self.conv1 = layer.Conv2d(num_channels, 20, 5, padding=0, activation="RELU")
self.conv2 = layer.Conv2d(20, 50, 5, padding=0, activation="RELU")
self.linear1 = layer.Linear(500)
self.linear2 = layer.Linear(num_classes)
self.pooling1 = layer.MaxPool2d(2, 2, padding=0)
self.pooling2 = layer.MaxPool2d(2, 2, padding=0)
self.relu = layer.ReLU()
self.flatten = layer.Flatten()
self.softmax_cross_entropy = layer.SoftMaxCrossEntropy()
def forward(self, x):
y = self.conv1(x)
y = self.pooling1(y)
y = self.conv2(y)
y = self.pooling2(y)
y = self.flatten(y)
y = self.linear1(y)
y = self.relu(y)
y = self.linear2(y)
return y
def train_one_batch(self, x, y, dist_option, spars):
out = self.forward(x)
loss = self.softmax_cross_entropy(out, y)
if dist_option == 'plain':
self.optimizer(loss)
elif dist_option == 'half':
self.optimizer.backward_and_update_half(loss)
elif dist_option == 'partialUpdate':
self.optimizer.backward_and_partial_update(loss)
elif dist_option == 'sparseTopK':
self.optimizer.backward_and_sparse_update(loss,
topK=True,
spars=spars)
elif dist_option == 'sparseThreshold':
self.optimizer.backward_and_sparse_update(loss,
topK=False,
spars=spars)
return out, loss
def set_optimizer(self, optimizer):
self.optimizer = optimizer
def create_model(pretrained=False, **kwargs):
"""Constructs a CNN model.
Args:
pretrained (bool): If True, returns a pre-trained model.
Returns:
The created CNN model.
"""
model = CNN(**kwargs)
return model
__all__ = ['CNN', 'create_model']
|
apache/incubator-singa
|
examples/cnn/model/cnn.py
|
Python
|
apache-2.0
| 3,086
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.unit import base
from openstack.network.v2 import load_balancer
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'admin_state_up': True,
'description': '2',
'id': IDENTIFIER,
'listeners': [{'id', '4'}],
'name': '5',
'operating_status': '6',
'provisioning_status': '7',
'tenant_id': '8',
'vip_address': '9',
'vip_subnet_id': '10',
'vip_port_id': '11',
'provider': '12',
'pools': [{'id', '13'}],
}
class TestLoadBalancer(base.TestCase):
def test_basic(self):
sot = load_balancer.LoadBalancer()
self.assertEqual('loadbalancer', sot.resource_key)
self.assertEqual('loadbalancers', sot.resources_key)
self.assertEqual('/lbaas/loadbalancers', sot.base_path)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_fetch)
self.assertTrue(sot.allow_commit)
self.assertTrue(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = load_balancer.LoadBalancer(**EXAMPLE)
self.assertTrue(sot.is_admin_state_up)
self.assertEqual(EXAMPLE['description'], sot.description)
self.assertEqual(EXAMPLE['id'], sot.id)
self.assertEqual(EXAMPLE['listeners'], sot.listener_ids)
self.assertEqual(EXAMPLE['name'], sot.name)
self.assertEqual(EXAMPLE['operating_status'], sot.operating_status)
self.assertEqual(EXAMPLE['provisioning_status'],
sot.provisioning_status)
self.assertEqual(EXAMPLE['tenant_id'], sot.project_id)
self.assertEqual(EXAMPLE['vip_address'], sot.vip_address)
self.assertEqual(EXAMPLE['vip_subnet_id'], sot.vip_subnet_id)
self.assertEqual(EXAMPLE['vip_port_id'], sot.vip_port_id)
self.assertEqual(EXAMPLE['provider'], sot.provider)
self.assertEqual(EXAMPLE['pools'], sot.pool_ids)
|
ctrlaltdel/neutrinator
|
vendor/openstack/tests/unit/network/v2/test_load_balancer.py
|
Python
|
gpl-3.0
| 2,432
|
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glib
import gtk
import pango
import sys
import random
import pickle
from base import *
from vec2 import *
from graph import *
import math
# Graph widget
###########################################################################
class GraphWidget(gtk.DrawingArea):
def __init__(self,graph):
gtk.Widget.__init__(self)
self._graph = graph
self._graph.changed.add_listener(self._on_changed)
self.connect("expose-event", self._on_expose)
self._timer_running = False
self._timer = Timer(0.033,self._on_timer_tick)
if self._graph.needs_layout:
self._timer.enabled = True
self.connect('size-allocate', self._on_resize)
self.set_events(gtk.gdk.POINTER_MOTION_MASK | gtk.gdk.POINTER_MOTION_HINT_MASK| gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
self.connect("motion_notify_event", self._on_mouse_moved)
self.connect("button-press-event", self._on_mouse_down)
self.connect("button-release-event", self._on_mouse_up)
self.connect("leave-notify-event", self._on_mouse_leave)
self._node_at_mouse_down = None
self._hovered_node = None
self._layout_enabled = True
def set_layout_enabled(self,en):
self._layout_enabled = en
if self._layout_enabled == True:
self._on_changed()
layout_enabled = property(lambda self: self._layout_enabled, set_layout_enabled)
def pick_node_at(self,x,y):
return self._pick_node(ivec2(x,y))
def _pick_node(self,v):
for n in self._graph.nodes:
np = n._position
nr = rect(n._position,n._last_drawn_size,centered=True)
if nr.contains(v):
return n
return None
def _on_mouse_down(self,w,event):
if event.button == 1:
mpos = self._from_screen(ivec2(event.x,event.y))
n = self._pick_node(mpos)
self._node_at_mouse_down = n
def _on_mouse_moved(self,w,event):
mpos = self._from_screen(ivec2(event.x,event.y))
n = self._pick_node(mpos)
old_hover = self._hovered_node
if self._hovered_node and self._hovered_node != n:
self._hovered_node.mouse_leave.fire()
self._hovered_node = n
if self._hovered_node and self._hovered_node != old_hover:
self._hovered_node.mouse_enter.fire()
def _on_mouse_up(self,w,event):
if self._node_at_mouse_down:
mpos = self._from_screen(ivec2(event.x,event.y))
ncur = self._pick_node(mpos)
if ncur == self._node_at_mouse_down:
print "Clicked"
self._node_at_mouse_down.clicked.fire()
self._node_at_mouse_down = None
def _on_mouse_leave(self,w,event):
if self._hovered_node:
self._hovered_node.leave.fire()
self._hovered_node = None
def _on_changed(self):
if self._layout_enabled == False:
return
size = self.get_allocation()
self._graph.layout()
self._update_transform()
self.queue_draw_area(0,0,size[0],size[1])
self._timer.enabled = True # keep doing layout
def _on_timer_tick(self):
# print "tick"
for i in range(0,10):
self._graph.layout()
if self._graph.needs_layout == False:
break
# redraw
self._update_transform()
self.queue_draw_area(0,0,self.allocation.width,self.allocation.height)
if self._graph.needs_layout:
return
# print "Stopping layout."
self._timer.enabled = False
def _on_resize(self,w,event):
self._update_transform()
def _update_transform(self):
bounds = self._graph.bounds
range = vec2_sub(bounds.hi, bounds.lo)
pad = vec2(range.x * 0.2, range.x * 0.1) # pad more on x because of labels
lo = vec2_sub(bounds.lo,pad) # move lo down by pad
range = vec2_add(range,pad) #increase range by 2*pad
range = vec2_add(range,pad) # hehe laziness
size = vec2(self.allocation.width,self.allocation.height)
if range.x == 0 or range.y == 0:
scale= vec2(1,1)
else:
scale = vec2_piecewise_div(size,range)
def to_screen(v):
return ivec2(vec2_piecewise_mul(vec2_sub(v,lo),scale))
def from_screen(in_s):
s = vec2(in_s)
return vec2_add(vec2_piecewise_div(s,scale),lo)
def from_screen_size(in_s):
s = vec2(in_s)
return vec2_piecewise_div(s,scale)
self._to_screen = to_screen
self._from_screen = from_screen
self._from_screen_size = from_screen_size
def _on_expose(self,a,b):
style = self.get_style()
gdk = gtk.gdk
black = gdk.color_parse("black")
yellow = gdk.color_parse("yellow")
white = gdk.color_parse("white")
g = self.window
gc = g.new_gc()
colormap = self.get_colormap()
colors = {}
def get_color(c):
if colors.has_key(c) == False:
colors[c] = colormap.alloc_color(c, False,False)
return colors[c]
# draw edges
gc.foreground = yellow
for e in self._graph.edges:
n1p = self._to_screen(e.node1._position)
n2p = self._to_screen(e.node2._position)
gc.line_width = e._weight
gc.foreground = get_color(e._color)
g.draw_line(gc, n1p.x, n1p.y, n2p.x, n2p.y)
# now draw nodes... :( i'm tired!
max_w = 100
layout = self.create_pango_layout("")
gc.line_width = 1
for n in self._graph.nodes:
np = self._to_screen(n._position)
layout.set_width(max_w)
layout.set_alignment(pango.ALIGN_LEFT);
layout.set_text(n._label)
layout_size = layout.get_pixel_size()
w = layout_size[0] + 4
lo_x = int(np.x - w/2)
h = layout_size[1]
lo_y = np.y - h/2
n._last_drawn_size = self._from_screen_size(vec2(w,h))
gc.foreground = get_color(n._background_color)
g.draw_rectangle(gc, True, lo_x, lo_y, w, h)
gc.foreground = get_color(n._border_color)
g.draw_rectangle(gc, False, lo_x, lo_y, w, h)
x = np.x - layout_size[0] / 2
y = np.y - layout_size[1] / 2
gc.foreground = get_color(n._text_color)
g.draw_layout(gc, x, y, layout)
###########################################################################
if __name__ == "__main__":
if len(sys.argv) > 1:
try:
f = open(sys.argv[1])
except:
print "Could not open %s" % sys.argv[1]
exit()
graph = pickle.load(f)
else:
graph = testGraph()
w = gtk.Window()
w.set_title("GraphWidget Test")
gw = GraphWidget(graph)
w.add(gw)
w.set_size_request(400,300)
w.show_all()
gtk.main()
|
natduca/ndbg
|
util/graph_widget.py
|
Python
|
apache-2.0
| 6,897
|
"""
:codeauthor: Li Kexian <doyenli@tencent.com>
"""
import os
import pytest
from salt.config import cloud_providers_config
from tests.support.case import ShellCase
from tests.support.helpers import random_string
from tests.support.runtests import RUNTIME_VARS
# Create the cloud instance name to be used throughout the tests
INSTANCE_NAME = random_string("CLOUD-TEST-", lowercase=False)
PROVIDER_NAME = "tencentcloud"
@pytest.mark.expensive_test
class TencentCloudTest(ShellCase):
"""
Integration tests for the Tencent Cloud cloud provider in Salt-Cloud
"""
def setUp(self):
"""
Sets up the test requirements
"""
super().setUp()
# check if appropriate cloud provider and profile files are present
profile_str = "tencentcloud-config"
providers = self.run_cloud("--list-providers")
if profile_str + ":" not in providers:
self.skipTest(
"Configuration file for {0} was not found. Check {0}.conf files "
"in tests/integration/files/conf/cloud.*.d/ to run these tests.".format(
PROVIDER_NAME
)
)
# check if personal access token, ssh_key_file, and ssh_key_names are present
config = cloud_providers_config(
os.path.join(
RUNTIME_VARS.FILES, "conf", "cloud.providers.d", PROVIDER_NAME + ".conf"
)
)
tid = config[profile_str][PROVIDER_NAME]["id"]
key = config[profile_str][PROVIDER_NAME]["key"]
if tid == "" or key == "":
self.skipTest(
"An api id and key must be provided to run these tests. Check "
"tests/integration/files/conf/cloud.providers.d/{}.conf".format(
PROVIDER_NAME
)
)
def test_instance(self):
"""
Test creating an instance on Tencent Cloud
"""
# check if instance with salt installed returned
try:
self.assertIn(
INSTANCE_NAME,
[
i.strip()
for i in self.run_cloud(
"-p tencentcloud-test {}".format(INSTANCE_NAME), timeout=500
)
],
)
except AssertionError:
self.run_cloud("-d {} --assume-yes".format(INSTANCE_NAME), timeout=500)
raise
# delete the instance
self.assertIn(
INSTANCE_NAME + ":",
[
i.strip()
for i in self.run_cloud(
"-d {} --assume-yes".format(INSTANCE_NAME), timeout=500
)
],
)
def tearDown(self):
"""
Clean up after tests
"""
query = self.run_cloud("--query")
ret_str = " {}:".format(INSTANCE_NAME)
# if test instance is still present, delete it
if ret_str in query:
self.run_cloud("-d {} --assume-yes".format(INSTANCE_NAME), timeout=500)
|
saltstack/salt
|
tests/integration/cloud/clouds/test_tencentcloud.py
|
Python
|
apache-2.0
| 3,073
|
#!/usr/bin/python
# -- encoding: utf-8 --
if not "raw_input" in dir(__builtins__):
raw_input = input
def blocoToValue(b):
if not len(b): return []
r = []
for k in range(max(b.keys())+1):
if k in b.keys():
r.append(int(b[k]))
else:
r.append(3)
return r
'''
A simplificação de função booleana pelo método de Karnaugh se baseia no
agrupamento dos bits com valor Verdadeiro em blocos 'vizinhos'.
Para isso, devemos definir o que significa bloco vizinho:
Um bloco vizinho a outro bloco é aquele onde há a mudança de apenas
um bit de cada vez, quando comparado com cada item do bloco original.
Por exemplo:
Para o índice (0,0) (dois bits) os possíveis vizinhos são:
(0,1) e (1,0).
Para os índices (0,0,0) e (0,0,1) (três bits e já vizinhos pela definição), os possíveis vizinhos são:
([0, 1, 0], [1, 1, 0]) e ([0, 0, 1], [1, 0, 1]).
Ao colocarmos esses índices na tabela de Karnaugh, notamos que ao seguir a difinição
de vizinhos, formam-se blocos adjacentes. Se todos os índices adjacentes contém
o valor Verdadeiro, pode-se simplificar esse bloco para apenas
os índices que não foram modificados dentro do bloco.
Com essa definição, podemos criar o algorítmo da seguinte forma:
T = Tabela de entrada contendo uma lista de indices onde os valores são Verdadeiro
Ir = Tabela contendo uma lista de indice onde os valores são irrelevantes
F = Lista contendo os blocos simplificados
1. Tome um vetor I com o primeiro índice ainda não utilizado da tabela T
2. Crie uma lista V com os vizinhos desse vetor
3. Para cada vizinho v da lista V, verifique
4. Os indices contidos em v estão na tabela T ou na tabela Ir?
Sim: Faça I = I concatenado com v, volte para 2
Não: Volte para 3 e teste o próximo vizinho
5. Terminou a lista de vizinhos?
Sim: O bloco simplificado está em I agora, F = F concatenado com I
Marque todos os índices de I como já utilizados
Volte para 1.
Não: Volte para 3 e teste o próximo vizinho
6. Terminou a lista de índices originais?
Sim: A função simplificada está em F, imprima
Terminou
Não: Volte para 1
Note que esse algorítmo é o mesmo que fazemos manualmente,
mas o agrupamento é feito visualmente, enquanto programaticamente,
ele é feito recursivamente.
De acordo com Buchfuhrer, D.; Umans, C. (2011). "The complexity of Boolean formula minimization". Journal of Computer and System Sciences 77: 142,
o problema de simplificação de circuitos booleanos é NP completo. Uma simplificação
perfeita sempre terá um tempo de execução exponencial ao número de bits do problema: O(2^n).
'''
import math
TRACE=False
# Cria lista de vizinhos dos itens dados
def vizinhos(item):
lista_vizinhos = []
bits_fixos = lista_bits(item)
# Para cada bit que não muda, crie uma entrada nova
for bit_fixo in bits_fixos:
bloco = []
# Para cada valor, crie um vizinho
for valor in item:
n = []
# Mude apenas um bit por vez para cada entrada individual
for bit in range(len(valor)):
if bit == bit_fixo:
n.append(1-valor[bit])
else:
n.append(valor[bit])
# Agrupe por quantidade de itens
bloco.append(n)
# Monte a lista
lista_vizinhos.append(tuple(bloco))
return lista_vizinhos
# Lista os bits que não mudam de valor na lista de itens
def lista_bits(itens):
lista = []
# Para cada bit
for j in range(len(itens[0])):
igual = True
for k in range(len(itens)-1):
# Verifica se o bit do item atual é igual ao do proximo item
if itens[k][j] != itens[k+1][j]:
igual = False
break
if igual:
# Adiciona na lista somente se passar pelo teste
lista.append(j)
return lista
# transforma um indice em vetor de bits
def indiceEmVetor(valor, bits):
v = []
# Para cada bit do vetor
for i in range(bits):
# Divida o numero por 2^i e adicione o bit lsb do resultado no vetor
v.append((valor>>i) & 1)
return v
# transforma um vetor de bits em um indice
def vetorEmIndice(vetor):
v = 0
# Para cada bit do vetor, começando pelo último valor (MSB)
for i in reversed(vetor):
# Multiplique o valor atual por 2 e adicione o novo bit
v = (v << 1) + i
return v
# calcula o numero minimo de bits necessarios para o vetor de indices
def numeroMinimoDeBits(valores,irrelevante,fixo=0):
# maximo indice na tabela + 1 para evitar log2(0)
# concatene 0 a lista para evitar max([])
maxval=max(valores+irrelevante+[0])+1
# calcule log2(maxval), arredondando para o inteiro acima
bits = int(math.ceil(math.log(maxval)/math.log(2)))
# no caso onde maxval é 1, numero de bits é 0.
# o mínimo necessario deve ser 1, corrija
if bits < 1 : bits = 1
if fixo:
if fixo<bits:
print( "Numero de variaveis requisitado '%d' nao eh suficiente para definir a funcao, utilizando '%d' variaveis" % (fixo,bits) )
return bits
else:
return fixo
return bits
# Transforma a lista de vetores na funcao do bloco
def criarBloco(vetor):
funcao = dict()
# somente os bits fixos
bits = lista_bits(vetor)
# para cada bit da funcao
for i in bits:
# adicione seu valor ao dicionario
funcao[i] = vetor[0][i] == 1
return funcao
def blocoEmTexto(bloco):
f = []
char_a = ord('A')
# para cada bit do bloco
for i in sorted(bloco):
# sufixo = " "
prefixo = ""
# se o valor do bit é 0, adicione a barra
#if not bloco[i]: sufixo = u"\u0305"
# if not bloco[i]: sufixo = u"'"
if not bloco[i]: prefixo = u"/"
# adicione a variavel
# f.append(chr(char_a+i) + sufixo)
f.append(prefixo+chr(char_a+i))
if len(f) == 0 : return u"1"
return "".join(f)
def funcaoEmTexto(funcao):
ret = []
# para cada bloco da funcao
for bloco in sorted(funcao,key=blocoToValue):
# converte para texto
ret.append(blocoEmTexto(bloco))
if ret == []:
return u"0"
return u" + ".join(ret)
def calcularPeso(testados,irrelevante,vetores):
peso = 0
# para cada entrada nos vetores
for v in vetores:
k = vetorEmIndice(v)
# incremente o peso para cada entrada ainda nao utilizada, somente se não está no irrelevante
if not (k in testados.keys()) and not (k in irrelevante):
peso = peso+1
return peso
def agrupar(testados,tabela,irrelevante,vetores,identacao=">"):
otimo = False
if TRACE: print( identacao+"Vetores originais", vetores )
# pegar os vizinhos da lista de indices
blocos = vizinhos(vetores)
# inicialize o maior bloco com os vetores originais
maior_bloco = vetores
# inicialize o peso do bloco encontrado com 0
# peso é a contagem de itens ainda não utilizados
# Quanto maior o peso, mais próximo da melhor otimização possível
peso = 0
# para cada bloco de vizinhos
for bloco in blocos:
if TRACE: print( identacao+"Testando vizinho", bloco )
encontrado = True
# para cada vizinho
for v in bloco:
# verifique se o indice existe na tabela
indice = vetorEmIndice(v)
if not (indice in tabela or indice in irrelevante):
if TRACE: print( identacao+" Falhou" )
# nao existe o indice, vizinho inválido, tente o próximo
encontrado = False
break;
# se todos os items foram 1
if encontrado:
# tente agrupar mais um nível
if TRACE: print( identacao+" Funcionou, testar um nivel acima" )
bloco_encontrado,otimo = agrupar(testados,tabela,irrelevante,vetores+list(bloco),identacao+">")
# Calcular peso do bloco_encontrado
novo_peso = calcularPeso(testados,irrelevante,bloco_encontrado)
# se o novo bloco é maior que o original ou se o peso do novo bloco é maior substitua
if (len(bloco_encontrado) > len(maior_bloco) or (len(bloco_encontrado) == len(maior_bloco) and novo_peso > peso)):
if TRACE and peso > 0: print( (identacao+" Melhor bloco encontrado com peso %d, (anterior era %d), substituindo") % (novo_peso,peso))
maior_bloco = bloco_encontrado
peso = novo_peso
# otimização:
# foi buscado no ultimo nivel, nenhum vizinho tera mais que metade dos bits agrupados
# nao testar proximos vizinhos
if (otimo):
break;
# otimização:
# todos os bits foram agrupados, nao teste outros vizinhos
if (len(maior_bloco) == (1 << len(vetores[0]))):
break;
else:
# otimizacao:
if (len(vetores) == (1 << (len(vetores[0])-1))):
otimo = True
if TRACE: print( identacao+"Maior bloco encontrado" , maior_bloco, ", peso %d" % calcularPeso(testados,irrelevante,maior_bloco) )
return (maior_bloco,otimo)
# Tabela contém os itens que são 1
# ex.: 1,2,5,7
# Irrelevante contém os items irrelevantes
# ex.: 6
# 0,1,1,0,0,1,X,1
def simplificar(tabela,irrelevante,bits=0):
# calcule o maximo numero de variaveis
numbits = numeroMinimoDeBits(tabela,irrelevante,bits)
testados = dict()
funcao = []
# para cada entrada na tabela
for i in tabela:
if not i in testados.keys():
# agrupe os valores 1, se a entrada ainda não foi utilizada antes
if TRACE: print( "Testando entrada da tabela" )
g,_ = agrupar(testados,tabela,irrelevante,[ indiceEmVetor(i,numbits) ])
# minimize e adicione o bloco na lista de funcoes
funcao.append(criarBloco(g))
# para cada entrada do grupo
for v in g:
k = vetorEmIndice(v)
# adicione esse indice à lista de entradas já utilizadas
testados[k]=True
return funcao
def simplificarEmTexto(tabela,irrelevante,bits=0):
return funcaoEmTexto(simplificar(tabela,irrelevante,bits))
def avaliar(funcao, indice,bits):
numbits=numeroMinimoDeBits([indice],[],bits)
v = indiceEmVetor(indice,numbits)
valor = 0
for bloco in funcao:
valor_bloco = 1
for i in reversed(list(bloco.keys())):
if i>=numbits:
valor_bloco = valor_bloco and (0 == bloco[i])
else:
valor_bloco = valor_bloco and (v[i] == bloco[i])
if not valor_bloco:
break
valor = valor or valor_bloco
if valor:
break
return valor
def validar(tabela,irrelevante,bits=0):
numbits = numeroMinimoDeBits(tabela,irrelevante,bits)
f = simplificar(tabela,irrelevante,numbits)
for i in range(1<<numbits):
if not (i in irrelevante):
v = avaliar(f,i,numbits)
if ((i in tabela and v != 1) or ( not (i in tabela) and v == 1 ) ):
print( "Falhou na validação: indice",i )
return []
return f
def reverseBits(v,bits):
r = 0;
for i in range(bits):
r = r | (((v>>i)&1)<<(bits-i-1))
return r
def funcaoEmIndices(s,bits):
import re
s = s.upper()
# Se contem caracteres invalidos, aborte
if re.search("[^A-Z +/.]",s):
raise Exception("Funcao contem caracteres invalidos")
tabela = []
bits = max(ord(max(s))-ord('A')+1,bits)
# crie lista com variaveis esperadas baseado na funcao ou no numero de bits requisitado
variaveis = [chr(c+ord('A')) for c in range(bits)]
# para cada bloco da funcao
for b in s.split("+"):
r = 0;
# para cada letra+negacao do bloco
bloco = list(map(lambda x: str.strip(x," ."),re.findall("(?: |[/.])?(?:[A-Z])",b.replace("//",""))))
utilizar_bloco = True
for variavel in bloco:
# Se a variavel contem a negacao, seu tamanho é 2
if len(variavel) == 2:
# ignore, já que esse bit é 0 para a variavel
continue
# calcule o indice (A=0, B=1, etc...)
idx = ord(variavel[0]) - ord('A')
# caso variavel e variavel' seja definido, esse bloco é sempre zero
if "/"+variavel in bloco:
utilizar_bloco = False
break
# ligue o bit correspondente ao bit encontrado
r = r|(1<<idx)
# caso variavel e variavel' seja definido, esse bloco é sempre zero
if not utilizar_bloco:
continue
x = [r]
# para cada variavel esperada
for variavel in variaveis:
# se a variavel nao explicitamente mencionada, adicione
# o indice onde a variavel é 1 também
# faca o mesmo para cada novo indice adicionado (recursao implementada com loop)
if not variavel in b:
i = len(x)
for k in range(i):
idx = ord(variavel[0]) - ord('A')
x.append(x[k] | (1<<idx))
# Adicione os indices na tabela, sem duplicacao
tabela=list(set(tabela+x))
return (tabela,bits)
if __name__ == "__main__":
import sys
bits=0
tabela=[]
irrelevante=[]
usage = '''Sintaxe: %s [tabela [irrelevante [bits]] | --help ]
'tabela' determina os índices onde o resultado da função tem que ser true.
'irrelevante' determina os índices onde o resultado da função é irrelevante.
'bits' define o número de bits que a função contém. Se não for fornecido,
será definido à partir de 'tabela' e 'irrelevante'.
'-h' ou '--help' mostra esta ajuda.
Os argumentos 'tabela' e 'irrelevante' podem ser uma lista de índices ou uma
funçao. Exemplos: '0,2,3', '/A/B+/AB+AB'.
Se nemhum argumento for fornecido, a tabela de verdade será solicitada de forma
interativa.''' % sys.argv[0]
if len(sys.argv)==2 and (sys.argv[1]=='-h' or sys.argv[1]=='--help'):
print( usage )
else:
if (len(sys.argv) > 1):
if len(sys.argv)>3: bits=int(sys.argv[3])
while(True):
try:
tabela = sys.argv[1].split(",")
if len(tabela) == 1 and tabela[0].strip() == '':
tabela = []
else:
tabela=[int(x,0) for x in tabela]
except:
tabela, bits = funcaoEmIndices(sys.argv[1], bits)
if (len(sys.argv)>2):
try:
ibits = bits
irrelevante = sys.argv[2].split(",")
if len(irrelevante) == 1 and irrelevante[0].strip() == '':
irrelevante = []
else:
irrelevante =[int(x,0) for x in irrelevante]
except:
irrelevante, ibits=funcaoEmIndices(sys.argv[2],bits)
if (bits == ibits):
break
else:
bits = max(ibits,bits)
else:
break
else:
reverse = True
if len(sys.argv)>1 and sys.argv[1] == "A=lsb":
reverse = False
bits = int(raw_input("Numero de variaveis: "))
if (bits<1):
print( "Entrada invalida" )
exit(1)
for j in range(1<<bits):
i = j
if reverse:
i=reverseBits(j,bits)
ok = False
while(not ok):
ok=True
n=raw_input("Entre o valor para "+blocoEmTexto(criarBloco([indiceEmVetor(i,bits)]))+": ")
if n == 'x' or n == 'X' or n == '.':
irrelevante.append(i)
elif n == '1':
tabela.append(i)
elif n == '0':
pass
else:
print( "Entrada invalida")
ok=False
bits = numeroMinimoDeBits(tabela,irrelevante,bits)
print( "%s \"%s\" \"%s\" %d" % (sys.argv[0],",".join(map(str,tabela)), ",".join(map(str,irrelevante)), bits ))
print( funcaoEmTexto(validar(sorted(tabela),irrelevante,bits)))
|
ReallyNiceGuy/karnaugh
|
karnaugh.py
|
Python
|
mit
| 15,164
|
# display_morphology.py ---
#
# Filename: display_morphology.py
# Description:
# Author:
# Maintainer:
# Created: Fri Mar 8 11:26:13 2013 (+0530)
# Version:
# Last-Updated: Thu Jul 18 15:12:01 2013 (+0530)
# By: subha
# Update #: 366
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
# Draw the schematic diagram of cells using networkx
#
#
# Change log:
#
#
#
#
# Code:
"""
Display/save the topology of one or all cells in traub_2005 demo.
command line options (all are optional):
-c celltype : display topology of cell type 'celltype'. If unspecified, all cell types are displayed
-p filename : save output to fiel specified by 'filename'
-l : show labels of the compartments
-h,--help : show this help
"""
import sys
sys.path.append('../../../python')
import os
import numpy as np
import matplotlib.pyplot as plt
import pygraphviz
import networkx as nx
import moose
import cells
def node_sizes(g):
"""Calculate the 2D projection area of each compartment.
g: graph whose nodes are moose Compartment objects.
return a numpy array with compartment areas in 2D projection
normalized by the maximum.
"""
sizes = []
comps = [moose.Compartment(n) for n in g.nodes()]
sizes = np.array([c.length * c.diameter for c in comps])
soma_i = [ii for ii in range(len(comps)) if comps[ii].path.endswith('comp_1')]
sizes[soma_i] *= np.pi/4 # for soma, length=diameter. So area is dimater^2 * pi / 4
return sizes / max(sizes)
def cell_to_graph(cell, label=False):
"""Convert a MOOSE compartmental neuron into a graph describing
the topology of the compartments
"""
soma = moose.element('%s/comp_1' % (cell.path))
if len(soma.neighbors['axialOut']) > 0:
msg = 'raxialOut'
elif len(soma.neighbors['distalOut']) > 0:
msg = 'distalOut'
else:
raise Exception('No neighbors on raxial or distal')
es = [(c1.path, c2[0].path, {'weight': 2/ (moose.Compartment(c1).Ra + moose.Compartment(c2).Ra)}) \
for c1 in moose.wildcardFind('%s/##[ISA=Compartment]' % (cell.path)) \
for c2 in moose.Compartment(c1).neighbors[msg]]
g = nx.Graph()
g.add_edges_from(es)
if label:
for v in g.nodes():
g.node[v]['label'] = v.rpartition('/')[-1]
return g
def axon_dendrites(g):
"""Get a 2-tuple with list of nodes representing axon and list of
nodes representing dendrites.
g: graph whose nodes are compartments
"""
axon = []
soma_dendrites = []
for n in g.nodes():
if moose.exists('%s/CaPool' % (n)):
soma_dendrites.append(n)
else:
axon.append(n)
return (axon, soma_dendrites)
def plot_cell_topology(cell, label=False):
g = cell_to_graph(cell, label=label)
axon, sd = axon_dendrites(g)
node_size = node_sizes(g)
weights = np.array([g.edge[e[0]][e[1]]['weight'] for e in g.edges()])
try:
pos = nx.graphviz_layout(g,prog='twopi',root=cell.path + '/comp_1')
except NameError:
# this is the best networkx can do by itself. Its Furchtman
# Reingold layout ends up with overlapping edges even for a
# tree. igraph does much better.
pos = nx.spectral_layout(g)
nx.draw_networkx_edges(g, pos, width=10*weights/max(weights), edge_color='gray', alpha=0.8)
nx.draw_networkx_nodes(g, pos, with_labels=False,
nnode_size=node_size * 500,
node_color=map(lambda x: 'k' if x in axon else 'gray', g.nodes()),
linewidths=[1 if n.endswith('comp_1') else 0 for n in g.nodes()],
alpha=0.8)
if label:
labels = dict([(n, g.node[n]['label']) for n in g.nodes()])
nx.draw_networkx_labels(g, pos, labels=labels)
plt.title(cell.__class__.__name__)
from matplotlib.backends.backend_pdf import PdfPages
import sys
from getopt import getopt
if __name__ == '__main__':
print sys.argv
optlist, args = getopt(sys.argv[1:], 'lhp:c:', ['help'])
celltype = ''
pdf = ''
label = False
for arg in optlist:
if arg[0] == '-c':
celltype = arg[1]
elif arg[0] == '-p':
pdf = arg[1]
elif arg[0] == '-l':
label = True
elif arg[0] == '-h' or arg[0] == '--help':
print 'Usage: %s [-c CellType [-p filename]]' % (sys.argv[0])
print 'Display/save the morphology of cell type "CellType".'
print 'Options:'
print '-c celltype (optional) display only an instance of the specified cell type. If CellType is empty or not specified, all prototype cells are displayed.'
print '-l label the compartments'
print '-p filename (optional) save outputin a pdf file named "filename".'
print '-h,--help print this help'
sys.exit(0)
print 'args', optlist, args
figures = []
if len(celltype) > 0:
try:
fig = plt.figure()
figures.append(fig)
cell = cells.init_prototypes()[celltype]
# print 'Label', label
plot_cell_topology(cell, label=label)
except KeyError:
print '%s: no such cell type. Available are:' % (celltype)
for ii in cells.init_prototypes().keys():
print ii,
print
sys.exit(1)
else:
for cell, proto in cells.init_prototypes().items():
figures.append(plt.figure())
plot_cell_topology(proto, label=label)
plt.axis('off')
if len(pdf) > 0:
pdfout = PdfPages(pdf)
for fig in figures:
pdfout.savefig(fig)
pdfout.close()
else:
plt.show()
#
# display_morphology.py ends here
|
dilawar/moose-full
|
moose-examples/traub_2005/py/display_morphology.py
|
Python
|
gpl-2.0
| 5,844
|
# coding=utf-8
from __future__ import absolute_import
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
from flask.ext.login import UserMixin
from flask.ext.principal import Identity
import hashlib
import os
import yaml
import uuid
from octoprint.settings import settings
class UserManager(object):
valid_roles = ["user", "admin"]
@staticmethod
def createPasswordHash(password, salt=None):
if not salt:
salt = settings().get(["accessControl", "salt"])
if salt is None:
import string
from random import choice
chars = string.ascii_lowercase + string.ascii_uppercase + string.digits
salt = "".join(choice(chars) for _ in xrange(32))
settings().set(["accessControl", "salt"], salt)
settings().save()
return hashlib.sha512(password + salt).hexdigest()
def checkPassword(self, username, password):
user = self.findUser(username)
if not user:
return False
hash = UserManager.createPasswordHash(password)
if user.check_password(hash):
# new hash matches, correct password
return True
else:
# new hash doesn't match, but maybe the old one does, so check that!
oldHash = UserManager.createPasswordHash(password, salt="mvBUTvwzBzD3yPwvnJ4E4tXNf3CGJvvW")
if user.check_password(oldHash):
# old hash matches, we migrate the stored password hash to the new one and return True since it's the correct password
self.changeUserPassword(username, password)
return True
else:
# old hash doesn't match either, wrong password
return False
def addUser(self, username, password, active, roles):
pass
def changeUserActivation(self, username, active):
pass
def changeUserRoles(self, username, roles):
pass
def addRolesToUser(self, username, roles):
pass
def removeRolesFromUser(self, username, roles):
pass
def changeUserPassword(self, username, password):
pass
def removeUser(self, username):
pass
def findUser(self, username=None):
return None
def getAllUsers(self):
return []
def hasBeenCustomized(self):
return False
##~~ FilebasedUserManager, takes available users from users.yaml file
class FilebasedUserManager(UserManager):
def __init__(self):
UserManager.__init__(self)
userfile = settings().get(["accessControl", "userfile"])
if userfile is None:
userfile = os.path.join(settings().settings_dir, "users.yaml")
self._userfile = userfile
self._users = {}
self._dirty = False
self._customized = None
self._load()
def _load(self):
if os.path.exists(self._userfile) and os.path.isfile(self._userfile):
self._customized = True
with open(self._userfile, "r") as f:
data = yaml.safe_load(f)
for name in data.keys():
attributes = data[name]
apikey = None
if "apikey" in attributes:
apikey = attributes["apikey"]
self._users[name] = User(name, attributes["password"], attributes["active"], attributes["roles"], apikey)
else:
self._customized = False
def _save(self, force=False):
if not self._dirty and not force:
return
data = {}
for name in self._users.keys():
user = self._users[name]
data[name] = {
"password": user._passwordHash,
"active": user._active,
"roles": user._roles,
"apikey": user._apikey
}
with open(self._userfile, "wb") as f:
yaml.safe_dump(data, f, default_flow_style=False, indent=" ", allow_unicode=True)
self._dirty = False
self._load()
def addUser(self, username, password, active=False, roles=None, apikey=None):
if not roles:
roles = ["user"]
if username in self._users.keys():
raise UserAlreadyExists(username)
self._users[username] = User(username, UserManager.createPasswordHash(password), active, roles, apikey)
self._dirty = True
self._save()
def changeUserActivation(self, username, active):
if not username in self._users.keys():
raise UnknownUser(username)
if self._users[username]._active != active:
self._users[username]._active = active
self._dirty = True
self._save()
def changeUserRoles(self, username, roles):
if not username in self._users.keys():
raise UnknownUser(username)
user = self._users[username]
removedRoles = set(user._roles) - set(roles)
self.removeRolesFromUser(username, removedRoles)
addedRoles = set(roles) - set(user._roles)
self.addRolesToUser(username, addedRoles)
def addRolesToUser(self, username, roles):
if not username in self._users.keys():
raise UnknownUser(username)
user = self._users[username]
for role in roles:
if not role in user._roles:
user._roles.append(role)
self._dirty = True
self._save()
def removeRolesFromUser(self, username, roles):
if not username in self._users.keys():
raise UnknownUser(username)
user = self._users[username]
for role in roles:
if role in user._roles:
user._roles.remove(role)
self._dirty = True
self._save()
def changeUserPassword(self, username, password):
if not username in self._users.keys():
raise UnknownUser(username)
passwordHash = UserManager.createPasswordHash(password)
user = self._users[username]
if user._passwordHash != passwordHash:
user._passwordHash = passwordHash
self._dirty = True
self._save()
def generateApiKey(self, username):
if not username in self._users.keys():
raise UnknownUser(username)
user = self._users[username]
user._apikey = ''.join('%02X' % ord(z) for z in uuid.uuid4().bytes)
self._dirty = True
self._save()
return user._apikey
def deleteApikey(self, username):
if not username in self._users.keys():
raise UnknownUser(username)
user = self._users[username]
user._apikey = None
self._dirty = True
self._save()
def removeUser(self, username):
if not username in self._users.keys():
raise UnknownUser(username)
del self._users[username]
self._dirty = True
self._save()
def findUser(self, username=None, apikey=None):
if username is not None:
if username not in self._users.keys():
return None
return self._users[username]
elif apikey is not None:
for user in self._users.values():
if apikey == user._apikey:
return user
return None
else:
return None
def getAllUsers(self):
return map(lambda x: x.asDict(), self._users.values())
def hasBeenCustomized(self):
return self._customized
##~~ Exceptions
class UserAlreadyExists(Exception):
def __init__(self, username):
Exception.__init__(self, "User %s already exists" % username)
class UnknownUser(Exception):
def __init__(self, username):
Exception.__init__(self, "Unknown user: %s" % username)
class UnknownRole(Exception):
def _init_(self, role):
Exception.__init__(self, "Unknown role: %s" % role)
##~~ User object
class User(UserMixin):
def __init__(self, username, passwordHash, active, roles, apikey=None):
self._username = username
self._passwordHash = passwordHash
self._active = active
self._roles = roles
self._apikey = apikey
def asDict(self):
return {
"name": self._username,
"active": self.is_active(),
"admin": self.is_admin(),
"user": self.is_user(),
"apikey": self._apikey
}
def check_password(self, passwordHash):
return self._passwordHash == passwordHash
def get_id(self):
return self._username
def get_name(self):
return self._username
def is_active(self):
return self._active
def is_user(self):
return "user" in self._roles
def is_admin(self):
return "admin" in self._roles
##~~ DummyUser object to use when accessControl is disabled
class DummyUser(User):
def __init__(self):
User.__init__(self, "dummy", "", True, UserManager.valid_roles)
def check_password(self, passwordHash):
return True
class DummyIdentity(Identity):
def __init__(self):
Identity.__init__(self, "dummy")
def dummy_identity_loader():
return DummyIdentity()
##~~ Apiuser object to use when api key is used to access the API
class ApiUser(User):
def __init__(self):
User.__init__(self, "api", "", True, UserManager.valid_roles)
|
C-o-r-E/OctoPrint
|
src/octoprint/users.py
|
Python
|
agpl-3.0
| 8,147
|
#
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This scripts automates deployment of ws2kafka application
(creates required service instances, pushes to TAP instance).
"""
from app_deployment_helpers import cf_cli
from app_deployment_helpers import cf_helpers
APP_NAME = "ws2kafka"
PARSER = cf_helpers.get_parser(APP_NAME)
ARGS = PARSER.parse_args()
CF_INFO = cf_helpers.get_info(ARGS)
cf_cli.login(CF_INFO)
cf_cli.create_service('kafka', 'shared', 'kafka-inst')
PROJECT_DIR = ARGS.project_dir if ARGS.project_dir else \
cf_helpers.get_project_dir()
cf_helpers.push(work_dir=PROJECT_DIR, options=ARGS.app_name)
|
trustedanalytics/ingestion-ws-kafka-hdfs
|
ws2kafka/deploy/deploy.py
|
Python
|
apache-2.0
| 1,167
|
import _plotly_utils.basevalidators
class TexttemplatesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="texttemplatesrc", parent_name="scattersmith", **kwargs
):
super(TexttemplatesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/scattersmith/_texttemplatesrc.py
|
Python
|
mit
| 436
|
#
# File: keytool.py
# Based on the work of:
# Copyright (c) 2017, paul@discotd5.com
# Modified by:
# Copyright (c) 2017, xabiergarmendia@gmail.com
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
#
#!/usr/bin/python
from sys import argv
def printhelp():
print "\nLand Rover Td5 Storm Engine ECU auth keygen"
print "Usage:"
print "\tGet key with seed (0-65535), for example:\n"
print "\t\tkeytool.py 1082"
print "\tor"
print "\t\tkeytool.py 0x043A\n"
print "\twill return key"
exit()
if len(argv)==2:
#
if argv[1].startswith("0x"):
seedin = int(argv[1],16)
else:
seedin = int(argv[1])
if 0<=seedin<=0xFFFF:
seed=seedin
count = ((seed >> 0xC & 0x8) + (seed >> 0x5 & 0x4) + (seed >> 0x3 & 0x2) + (seed & 0x1)) + 1
for idx in range(0, count):
tap = ((seed >> 1) + (seed >> 2 ) + (seed >> 8 ) + (seed >> 9)) & 1
tmp = (seed >> 1) | ( tap << 0xF)
if (seed >> 0x3 & 1) and (seed >> 0xD & 1):
seed = tmp & ~1
else:
seed = tmp | 1
#Calculate high and low bytes, for auth response
high = seed >> 8
low = seed & 255
print "Seed: ",seedin," (",hex(seedin),") - Key: ",seed," (",hex(seed),") "
else:
printhelp()
else:
printhelp()
|
pajacobson/td5keygen
|
keytool.py
|
Python
|
bsd-2-clause
| 2,904
|
#!/usr/bin/python2.5
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing data providers for UserProperty.
"""
from soc.modules.seeder.logic.providers.provider import BaseDataProvider
from soc.modules.seeder.logic.providers.string import RandomNameProvider
from soc.modules.seeder.logic.providers.provider import FixedValueProvider
from soc.modules.seeder.logic.providers.provider import ParameterValueError
from django.core.validators import email_re
__authors__ = [
'"Felix Kerekes" <sttwister@gmail.com>',
]
# pylint: disable=W0223
class EmailProvider(BaseDataProvider):
"""Base class for all data providers that return an e-mail.
"""
pass
class FixedEmailProvider(EmailProvider, FixedValueProvider):
"""Data provider that returns a fixed e-mail.
"""
def checkParameters(self):
super(FixedEmailProvider, self).checkParameters()
value = self.param_values.get('value', None)
try:
if not email_re.match(value):
raise ValueError
except (TypeError, ValueError):
raise ParameterValueError('%s is not a valid e-mail address' % value)
class RandomEmailProvider(EmailProvider, RandomNameProvider):
"""Data provider that returns a random e-mail.
"""
@staticmethod
def getRandomDomain():
"""Returns a random domain for a link
"""
#TODO(sttwister): Really return a random domain
return "gmail.com"
def getValue(self):
name = RandomNameProvider.getValue(self)
return '.'.join(name.split()).lower() + '@' + self.getRandomDomain()
|
SRabbelier/Melange
|
app/soc/modules/seeder/logic/providers/email.py
|
Python
|
apache-2.0
| 2,069
|
COMMTRACK_REPORT_XMLNS = 'http://commcarehq.org/ledger/v1'
SECTION_TYPE_STOCK = 'stock'
REPORT_TYPE_BALANCE = 'balance'
REPORT_TYPE_TRANSFER = 'transfer'
VALID_REPORT_TYPES = (REPORT_TYPE_BALANCE, REPORT_TYPE_TRANSFER)
TRANSACTION_TYPE_STOCKONHAND = 'stockonhand'
TRANSACTION_TYPE_STOCKOUT = 'stockout'
TRANSACTION_TYPE_RECEIPTS = 'receipts'
TRANSACTION_TYPE_CONSUMPTION = 'consumption'
TRANSACTION_TYPE_LA = 'lossoradjustment'
TRANSACTION_SUBTYPE_INFERRED = '_inferred'
|
puttarajubr/commcare-hq
|
corehq/ex-submodules/casexml/apps/stock/const.py
|
Python
|
bsd-3-clause
| 475
|
from django.contrib import admin
from django import forms
from django.conf import settings
class DraftAdmin(admin.ModelAdmin):
"""
Add draft.js file
"""
def _media(self):
from django.conf import settings
js = ['js/core.js', 'js/admin/RelatedObjectLookups.js',
'js/jquery.min.js', 'js/jquery.init.js']
if self.actions is not None:
js.extend(['js/actions.min.js'])
if self.prepopulated_fields:
js.append('js/urlify.js')
js.append('js/prepopulate.min.js')
if self.opts.get_ordered_objects():
js.extend(['js/getElementsBySelector.js', 'js/dom-drag.js' , 'js/admin/ordering.js'])
js = ['%s%s' % (settings.ADMIN_MEDIA_PREFIX, url) for url in js]
js.append('%sdraft/js/draft.js' % (settings.STATIC_URL,))
return forms.Media(js=js)
media = property(_media)
|
platypus-creation/django-draft
|
draft/admin.py
|
Python
|
mit
| 905
|
from .models import get_unread_inbox_counter
def group_messaging_context(request):
if request.user.is_authenticated():
count_record = get_unread_inbox_counter(request.user)
return {'group_messaging_unread_inbox_count': count_record.count}
return {}
|
divio/askbot-devel
|
askbot/deps/group_messaging/context.py
|
Python
|
gpl-3.0
| 274
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
MultipleExternalInputDialog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
(C) 2013 by CS Systemes d'information (CS SI)
Email : volayaf at gmail dot com
otb at c-s dot fr (CS SI)
Contributors : Victor Olaya - basis from MultipleInputDialog
Alexia Mondot (CS SI) - new parameter
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.core import QgsSettings
from qgis.PyQt import uic
from qgis.PyQt.QtCore import QByteArray
from qgis.PyQt.QtWidgets import QDialog, QAbstractItemView, QPushButton, QDialogButtonBox, QFileDialog
from qgis.PyQt.QtGui import QStandardItemModel, QStandardItem
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'DlgMultipleSelection.ui'))
class MultipleFileInputDialog(BASE, WIDGET):
def __init__(self, options):
super(MultipleFileInputDialog, self).__init__(None)
self.setupUi(self)
self.lstLayers.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.selectedoptions = options
# Additional buttons
self.btnAdd = QPushButton(self.tr('Add file'))
self.buttonBox.addButton(self.btnAdd,
QDialogButtonBox.ActionRole)
self.btnRemove = QPushButton(self.tr('Remove file(s)'))
self.buttonBox.addButton(self.btnRemove,
QDialogButtonBox.ActionRole)
self.btnRemoveAll = QPushButton(self.tr('Remove all'))
self.buttonBox.addButton(self.btnRemoveAll,
QDialogButtonBox.ActionRole)
self.btnAdd.clicked.connect(self.addFile)
self.btnRemove.clicked.connect(lambda: self.removeRows())
self.btnRemoveAll.clicked.connect(lambda: self.removeRows(True))
self.settings = QgsSettings()
self.restoreGeometry(self.settings.value("/Processing/multipleFileInputDialogGeometry", QByteArray()))
self.populateList()
self.finished.connect(self.saveWindowGeometry)
def saveWindowGeometry(self):
self.settings.setValue("/Processing/multipleInputDialogGeometry", self.saveGeometry())
def populateList(self):
model = QStandardItemModel()
for option in self.selectedoptions:
item = QStandardItem(option)
model.appendRow(item)
self.lstLayers.setModel(model)
def accept(self):
self.selectedoptions = []
model = self.lstLayers.model()
for i in range(model.rowCount()):
item = model.item(i)
self.selectedoptions.append(item.text())
QDialog.accept(self)
def reject(self):
QDialog.reject(self)
def addFile(self):
settings = QgsSettings()
if settings.contains('/Processing/LastInputPath'):
path = settings.value('/Processing/LastInputPath')
else:
path = ''
files, selected_filter = QFileDialog.getOpenFileNames(self,
self.tr('Select File(s)'), path, self.tr('All files (*.*)'))
if len(files) == 0:
return
model = self.lstLayers.model()
for filePath in files:
item = QStandardItem(filePath)
model.appendRow(item)
settings.setValue('/Processing/LastInputPath',
os.path.dirname(files[0]))
def removeRows(self, removeAll=False):
if removeAll:
self.lstLayers.model().clear()
else:
self.lstLayers.setUpdatesEnabled(False)
indexes = sorted(self.lstLayers.selectionModel().selectedIndexes())
for i in reversed(indexes):
self.lstLayers.model().removeRow(i.row())
self.lstLayers.setUpdatesEnabled(True)
|
CS-SI/QGIS
|
python/plugins/processing/gui/MultipleFileInputDialog.py
|
Python
|
gpl-2.0
| 4,837
|
class Tree:
class Node:
def __init__(self, data):
self.parent = None
self.left = None
self.right = None
self.key = data
def __init__(self):
self.root = None
def find(self, data):
p = self.root
while p is not None and p.key != data:
if data > p.key:
p = p.right
else:
p = p.left
return p
def insert(self, data):
p = self.find(data)
if p is not None:
return
node = Tree.Node(data)
if self.root is None:
self.root = node
return
p = self.root
while True:
if data < p.key:
if p.left is None:
p.left = node
node.parent = p
break
else:
p = p.left
else:
if p.right is None:
p.right = node
node.parent = p
break
else:
p = p.right
|
mipt-cs-on-python3/lections
|
lection_23/class Tree.py
|
Python
|
gpl-3.0
| 1,112
|
# -*- coding: utf-8 -*-
################################################################
## Game.py
################################################################
# Classe Game: nécessite Player.py
#
# Antoine Courcelles, Marc Cerou,
# André Guimaraes-Duarte, Doina Leca
from Player import*
class Game:
def __init__(self, nbPlayer, listPlayer, listName):
self.listPlayer=[]
self.nbPlayer=nbPlayer
# création des bonus
self.nbFood=1
if nbPlayer>1:
self.nbFood=nbPlayer-1
self.listFood=[]
for i in range(self.nbFood):
new=[]
new.append(random.randint(0, window_width))
new.append(random.randint(0, window_height))
self.listFood.append(new)
# création des joueurs
for i in range(nbPlayer):
self.listPlayer.append(Player(listName[i], listPlayer[i], list_color[i], list_init[i]))
# Gère le Game over : si un joueur en mange un autre, return objet player fautif
def gameOver_player(self):
for player in self.listPlayer:
head=player.coordinates[0]
for player2 in self.listPlayer:
if player!=player2:
for case in player2.coordinates:
if head==case:
return player
return 0
# Affichage du GAME OVER et du joueur en question
def gameOver_stop(self, name, score):
print "GAME OVER Joueur : "+str(name)+" Score : "+str(score)
# Met à jour le score du joueur s'il a mangé un bonus
def upScore(self):
for player in self.listPlayer:
head=player.coordinates[0]
for food in self.listFood:
#gere l'approximation head!=food
if ((food[0]>head[0]-approxEat and food[0]<head[0]+approxEat)
and (food[1]>head[1]-approxEat and food[1]<head[1]+approxEat)):
player.majScore()
self.listFood.pop(self.listFood.index(food))
new=[]
new.append(random.randint(0, window_width))
new.append(random.randint(0, window_height))
self.listFood.append(new)
break
# Jeu : met à jour le mouvement + score
# supprime le joueur s'il fait un GO
def play(self):
for player in self.listPlayer:
game_over=0
player.movement()
player.stayHere()
self.upScore()
if player.gameOver_lonely():
self.gameOver_stop(player.name, player.score)
self.listPlayer.pop(self.listPlayer.index(player))
del(player)
game_over=1
player_GO=self.gameOver_player()
if player_GO:
self.gameOver_stop(player_GO.name, player.score)
self.listPlayer.pop(self.listPlayer.index(player_GO))
del(player_GO)
game_over=1
if game_over and len(self.listPlayer)==0:
return 0
return 1
# Formatage coordonnées Snake pour le reseau
def prepareListSnake(self):
listSnake=''
for player in self.listPlayer:
listSnake+=str(player.color)+'/'
for case in player.coordinates:
listSnake+=str(case[0])+':'+str(case[1])+','
listSnake=listSnake[:-1]+';'
listSnake=listSnake[:-1]
return listSnake
# Formatage coordonnées Bonus pour le reseau
def prepareListFood(self):
listFood=''
for case in self.listFood:
listFood+=str(case[0])+','+str(case[1])+';'
listFood=listFood[:-1]
return listFood
|
x-alp/Oksamultisnake
|
Game.py
|
Python
|
gpl-3.0
| 3,107
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineScaleSetIdentity(Model):
"""Identity for the virtual machine scale set.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar principal_id: The principal id of virtual machine scale set
identity.
:vartype principal_id: str
:ivar tenant_id: The tenant id associated with the virtual machine scale
set.
:vartype tenant_id: str
:param type: The type of identity used for the virtual machine scale set.
Currently, the only supported type is 'SystemAssigned', which implicitly
creates an identity. Possible values include: 'SystemAssigned'
:type type: str or
~azure.mgmt.compute.v2016_03_30.models.ResourceIdentityType
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'ResourceIdentityType'},
}
def __init__(self, type=None):
super(VirtualMachineScaleSetIdentity, self).__init__()
self.principal_id = None
self.tenant_id = None
self.type = type
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/virtual_machine_scale_set_identity.py
|
Python
|
mit
| 1,774
|
from .TestContainersDeviceAndManager import TestContainerDeviceDataFlow
from .TestContainersReceivingSerialDataAndObserverPattern import TestContainersReceivingSerialDataAndObserverPattern
|
rCorvidae/OrionPI
|
src/tests/Devices/Containers/__init__.py
|
Python
|
mit
| 188
|
# downscale the prepped cmip5 data used in running the TEM model (IEM)
# author: Michael Lindgren
if __name__ == '__main__':
import glob, os, rasterio, itertools
from functools import partial
import downscale
from downscale import preprocess
import numpy as np
import argparse
# # parse the commandline arguments
parser = argparse.ArgumentParser( description='downscale the AR5-CMIP5 data to the AKCAN extent required by SNAP' )
parser.add_argument( "-b", "--base_dir", action='store', dest='base_dir', type=str, help="base directory where data is stored in structured folders" )
parser.add_argument( "-m", "--model", action='store', dest='model', type=str, help="cmip5 model name (exact)" )
parser.add_argument( "-v", "--variable", action='store', dest='variable', type=str, help="cmip5 variable name (exact)" )
parser.add_argument( "-s", "--scenario", action='store', dest='scenario', type=str, help="cmip5 scenario name (exact)" )
parser.add_argument( "-u", "--units", action='store', dest='units', type=str, help="cmip5 units name (exact)" )
parser.add_argument( "-met", "--metric", action='store', dest='metric', type=str, help="cmip5 metric name (exact)" )
parser.add_argument( "-lev", "--level", action='store', dest='level', type=int, help="optional level to extract for downscaling" )
parser.add_argument( "-levn", "--level_name", action='store', dest='level_name', type=str, help="name of level variable" )
args = parser.parse_args()
# unpack the args
variable = args.variable
scenario = args.scenario
model = args.model
units = args.units
metric = args.metric
base_dir = args.base_dir
level = args.level
level_name = args.level_name
if level is not None:
level = float( level )
# hardwired ARGS -- CMIP5
project = 'ar5'
interp = False
find_bounds = False
fix_clim = False
aoi_mask = None # for precip data only
anom = True # write out anoms (True) or not (False)
# # # FOR TESTING # # #
# base_dir = '/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data'
# variable = 'clt'
# scenario = 'historical'
# model = 'GFDL-CM3'
# units = 'pct'
# metric = 'mean'
# level_name = None
# level = None
# # level = 1000 # mb / Pa
# # level_name = 'plev'
# # if level is not None:
# # level = float( level )
# # # # # # END TESTING # # #
# some setup args
base_path = os.path.join( base_dir,'cmip5','prepped' )
output_dir = os.path.join( base_dir, 'insolation_L48', 'downscaled_L48' )
variables = [ variable ]
scenarios = [ scenario ]
models = [ model ]
# modelnames is simply the string name to put in the output filenaming if that differs from the modelname
# used in querying the file which is the models list variable
all_models = [ 'IPSL-CM5A-LR', 'MRI-CGCM3', 'GISS-E2-R', 'GFDL-CM3', 'CCSM4' ] # temp for distributed run
modelnames = [ 'IPSL-CM5A-LR', 'MRI-CGCM3', 'GISS-E2-R', 'GFDL-CM3', 'NCAR-CCSM4' ]
modelnames = dict( zip( all_models, modelnames ) )
if not os.path.exists( output_dir ):
os.makedirs( output_dir )
os.chdir( output_dir )
for variable, model, scenario in itertools.product( variables, models, scenarios ):
modelname = modelnames[ model ]
# SETUP BASELINE
cru_cl20_varnames = {'hur':'reh', 'clt':'clt'} # we only support these variables for now...
clim_path = os.path.join( base_dir, 'insolation_L48', 'climatologies', cru_cl20_varnames[variable] )
filelist = glob.glob( os.path.join( clim_path, '*.tif' ) )
filelist = [ i for i in filelist if '_14_' not in i ] # remove the GD ANNUAL _14_ file.
baseline = downscale.Baseline( filelist )
input_path = os.path.join( base_path, model, scenario, variable )
output_path = os.path.join( output_dir, model, scenario, variable )
if not os.path.exists( output_path ):
os.makedirs( output_path )
print( input_path )
# list files for this set of downscaling -- one per folder
fn, = glob.glob( os.path.join( input_path, '*.nc' ) )
if 'historical' in scenario:
historical = downscale.Dataset( fn, variable, model, scenario, project=project, units=units,
metric=metric, begin=1900, end=2005, level_name=level_name, level=level )
future = None
else:
# get the historical data for anomalies
historical_fn, = glob.glob( os.path.join( os.path.dirname( fn ).replace( scenario, 'historical' ), '*.nc' ) )
historical = downscale.Dataset( historical_fn, variable, model, scenario, project=project, units=units,
metric=metric, begin=1900, end=2005, level_name=level_name, level=level )
future = downscale.Dataset( fn, variable, model, scenario, project=project, units=units, metric=metric,
begin=2006, end=2100, level_name=level_name, level=level )
# convert from Kelvin to Celcius
if variable == 'tas':
if historical:
historical.ds[ variable ] = historical.ds[ variable ] - 273.15
historical.ds[ variable ][ 'units' ] = units
if future:
future.ds[ variable ] = future.ds[ variable ] - 273.15
future.ds[ variable ][ 'units' ] = units
# DOWNSCALE
mask = rasterio.open( baseline.filelist[0] ).read_masks( 1 )
clim_begin = '1961'
clim_end = '1990'
if variable == 'pr':
rounder = np.rint
downscaling_operation = 'mult'
elif variable in ['hur','cld','clt']:
rounder = partial( np.round, decimals=1 )
downscaling_operation = 'mult'
else:
rounder = partial( np.round, decimals=1 )
downscaling_operation = 'add'
def round_it( x, mask ):
arr = np.ma.masked_array( data=x, mask=mask )
return rounder( arr )
round_data = partial( round_it, mask=( mask==0 ) )
def round_data_clamp_hur( x ):
x = round_data( x )
x[ x < 0.0 ] = 0.0
x[ x > 100.0 ] = 95.0 # per Stephanie McAfee
return x
def round_data_clamp_clt( x ):
x = round_data( x )
x[ x < 0.0 ] = 0.0
x[ x > 100.0 ] = 100.0 # per Stephanie McAfee
return x
if variable == 'hur':
post_downscale_function = round_data_clamp_hur
elif variable == 'clt':
post_downscale_function = round_data_clamp_clt
else:
post_downscale_function = round_data
ar5 = downscale.DeltaDownscale( baseline, clim_begin, clim_end, historical, future,
downscaling_operation=downscaling_operation, mask=mask, mask_value=0, ncpus=64,
src_crs={'init':'epsg:4326'}, src_nodata=None, dst_nodata=None,
post_downscale_function=post_downscale_function, varname=variable, modelname=modelname,
anom=anom, interp=interp, find_bounds=find_bounds, fix_clim=fix_clim, aoi_mask=aoi_mask )
ar5.downscale( output_dir=output_path )
|
ua-snap/downscale
|
snap_scripts/downscaling_L48/downscale_cmip5_L48.py
|
Python
|
mit
| 6,512
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Fortran/F08FILESUFFIXES.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import TestSCons
from common import write_fake_link
_python_ = TestSCons._python_
_exe = TestSCons._exe
test = TestSCons.TestSCons()
write_fake_link(test)
test.write('myfortran.py', r"""
import getopt
import sys
comment = '#' + sys.argv[1]
opts, args = getopt.getopt(sys.argv[2:], 'co:')
for opt, arg in opts:
if opt == '-o': out = arg
infile = open(args[0], 'rb')
outfile = open(out, 'wb')
for l in infile.readlines():
if l[:len(comment)] != comment:
outfile.write(l)
sys.exit(0)
""")
# Test default file suffix: .f90/.F90 for F90
test.write('SConstruct', """
env = Environment(LINK = r'%(_python_)s mylink.py',
LINKFLAGS = [],
F08 = r'%(_python_)s myfortran.py f08',
FORTRAN = r'%(_python_)s myfortran.py fortran')
env.Program(target = 'test01', source = 'test01.f')
env.Program(target = 'test02', source = 'test02.F')
env.Program(target = 'test03', source = 'test03.for')
env.Program(target = 'test04', source = 'test04.FOR')
env.Program(target = 'test05', source = 'test05.ftn')
env.Program(target = 'test06', source = 'test06.FTN')
env.Program(target = 'test07', source = 'test07.fpp')
env.Program(target = 'test08', source = 'test08.FPP')
env.Program(target = 'test09', source = 'test09.f08')
env.Program(target = 'test10', source = 'test10.F08')
""" % locals())
test.write('test01.f', "This is a .f file.\n#link\n#fortran\n")
test.write('test02.F', "This is a .F file.\n#link\n#fortranpp\n")
test.write('test03.for', "This is a .for file.\n#link\n#fortran\n")
test.write('test04.FOR', "This is a .FOR file.\n#link\n#fortranpp\n")
test.write('test05.ftn', "This is a .ftn file.\n#link\n#fortranpp\n")
test.write('test06.FTN', "This is a .FTN file.\n#link\n#fortranpp\n")
test.write('test07.fpp', "This is a .fpp file.\n#link\n#fortranpp\n")
test.write('test08.FPP', "This is a .FPP file.\n#link\n#fortranpp\n")
test.write('test09.f08', "This is a .f08 file.\n#link\n#f08\n")
test.write('test10.F08', "This is a .F08 file.\n#link\n#f08pp\n")
test.run(arguments = '.', stderr = None)
test.must_match('test01' + _exe, "This is a .f file.\n")
test.must_match('test02' + _exe, "This is a .F file.\n")
test.must_match('test03' + _exe, "This is a .for file.\n")
test.must_match('test04' + _exe, "This is a .FOR file.\n")
test.must_match('test05' + _exe, "This is a .ftn file.\n")
test.must_match('test06' + _exe, "This is a .FTN file.\n")
test.must_match('test07' + _exe, "This is a .fpp file.\n")
test.must_match('test08' + _exe, "This is a .FPP file.\n")
test.must_match('test09' + _exe, "This is a .f08 file.\n")
test.must_match('test10' + _exe, "This is a .F08 file.\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
EmanueleCannizzaro/scons
|
test/Fortran/F08FILESUFFIXES.py
|
Python
|
mit
| 4,028
|
#############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
import unittest
from PySide2.QtCore import *
class MyTimer (QTimer):
def __init__(self):
QTimer.__init__(self)
self.startCalled = False
@Slot()
def slotUsedToIncreaseMethodOffset(self):
pass
class MyTimer2 (MyTimer):
@Slot()
def slotUsedToIncreaseMethodOffset2(self):
pass
def start(self):
self.startCalled = True
QCoreApplication.instance().quit()
class TestBug1019 (unittest.TestCase):
def testIt(self):
app = QCoreApplication([])
t = MyTimer2()
QTimer.singleShot(0, t.start)
app.exec_()
self.assertTrue(t.startCalled)
if __name__ == "__main__":
unittest.main()
|
qtproject/pyside-pyside
|
tests/QtCore/bug_1019.py
|
Python
|
lgpl-2.1
| 1,956
|
from stevedore import driver
#TODO: Put in the failure callback so that if
# you don't have larcv installed, it fails gracefully
def process(driver_name, file_names):
mgr = driver.DriverManager(
namespace='root2hdf5.plugins',
name=driver_name,
invoke_on_load=False,
)
mgr.driver.process(file_names)
|
HEP-DL/root2hdf5
|
root2hdf5/framework/driver.py
|
Python
|
gpl-3.0
| 320
|
"""
connection tools to manage kinds of connection.
"""
import logging
import os
import shutil
import tempfile
import commands
from autotest.client import utils, os_dep
from virttest import propcan, remote, utils_libvirtd
from virttest import data_dir, aexpect
class ConnectionError(Exception):
"""
The base error in connection.
"""
pass
class ConnForbiddenError(ConnectionError):
"""
Error in forbidden operation.
"""
def __init__(self, detail):
ConnectionError.__init__(self)
self.detail = detail
def __str__(self):
return ('Operation is forbidden.\n'
'Message: %s' % self.detail)
class ConnCopyError(ConnectionError):
"""
Error in coping file.
"""
def __init__(self, src_path, dest_path):
ConnectionError.__init__(self)
self.src_path = src_path
self.dest_path = dest_path
def __str__(self):
return ('Copy file from %s to %s failed.'
% (self.src_path, self.dest_path))
class ConnNotImplementedError(ConnectionError):
"""
Error in calling unimplemented method
"""
def __init__(self, method_type, class_type):
ConnectionError.__init__(self)
self.method_type = method_type
self.class_type = class_type
def __str__(self):
return ('Method %s is not implemented in class %s\n'
% (self.method_type, self.class_type))
class ConnLoginError(ConnectionError):
"""
Error in login.
"""
def __init__(self, dest, detail):
ConnectionError.__init__(self)
self.dest = dest
self.detail = detail
def __str__(self):
return ("Got a error when login to %s.\n"
"Error: %s\n" % (self.dest, self.detail))
class ConnToolNotFoundError(ConnectionError):
"""
Error in not found tools.
"""
def __init__(self, tool, detail):
ConnectionError.__init__(self)
self.tool = tool
self.detail = detail
def __str__(self):
return ("Got a error when access the tool (%s).\n"
"Error: %s\n" % (self.tool, self.detail))
class ConnSCPError(ConnectionError):
"""
Error in SCP.
"""
def __init__(self, src_ip, src_path, dest_ip, dest_path, detail):
ConnectionError.__init__(self)
self.src_ip = src_ip
self.src_path = src_path
self.dest_ip = dest_ip
self.dest_path = dest_path
self.detail = detail
def __str__(self):
return ("Failed scp from %s on %s to %s on %s.\n"
"error: %s.\n" %
(self.src_path, self.src_ip, self.dest_path,
self.dest_ip, self.detail))
class SSHCheckError(ConnectionError):
"""
Base Error in check of SSH connection.
"""
def __init__(self, server_ip, output):
ConnectionError.__init__(self)
self.server_ip = server_ip
self.output = output
def __str__(self):
return ("SSH to %s failed.\n"
"output: %s " % (self.server_ip, self.output))
class SSHRmAuthKeysError(ConnectionError):
"""
Error in removing authorized_keys file.
"""
def __init__(self, auth_keys, output):
ConnectionError.__init__(self)
self.auth_keys = auth_keys
self.output = output
def __str__(self):
return ("Failed to remove authorized_keys file (%s).\n"
"output: %s .\n" % (self.auth_keys, self.output))
class ConnCmdClientError(ConnectionError):
"""
Error in executing cmd on client.
"""
def __init__(self, cmd, output):
ConnectionError.__init__(self)
self.cmd = cmd
self.output = output
def __str__(self):
return ("Execute command '%s' on client failed.\n"
"output: %s" % (self.cmd, self.output))
class ConnPrivKeyError(ConnectionError):
"""
Error in building private key with certtool command.
"""
def __init__(self, key, output):
ConnectionError.__init__(self)
self.key = key
self.output = output
def __str__(self):
return ("Failed to build private key file (%s).\n"
"output: %s .\n" % (self.key, self.output))
class ConnCertError(ConnectionError):
"""
Error in building certificate file with certtool command.
"""
def __init__(self, cert, output):
ConnectionError.__init__(self)
self.cert = cert
self.output = output
def __str__(self):
return ("Failed to build certificate file (%s).\n"
"output: %s .\n" % (self.cert, self.output))
class ConnRmCertError(ConnectionError):
"""
Error in removing certificate file with rm command.
"""
def __init__(self, cert, output):
ConnectionError.__init__(self)
self.cert = cert
self.output = output
def __str__(self):
return ("Failed to remove certificate file/path (%s).\n"
"output: %s .\n" % (self.cert, self.output))
class ConnMkdirError(ConnectionError):
"""
Error in making directory.
"""
def __init__(self, directory, output):
ConnectionError.__init__(self)
self.directory = directory
self.output = output
def __str__(self):
return ("Failed to make directory %s \n"
"output: %s.\n" % (self.directory, self.output))
class ConnServerRestartError(ConnectionError):
"""
Error in restarting libvirtd on server.
"""
def __init__(self, output):
ConnectionError.__init__(self)
self.output = output
def __str__(self):
return ("Failed to restart libvirtd service on server.\n"
"output: %s.\n" % (self.output))
class ConnectionBase(propcan.PropCanBase):
"""
Base class of a connection between server and client.
Connection is build to from client to server. And there are
some information for server and client in ConnectionBase.
"""
__slots__ = ('server_ip', 'server_user', 'server_pwd',
'client_ip', 'client_user', 'client_pwd',
'server_session', 'client_session',
'tmp_dir', 'auto_recover')
def __init__(self, *args, **dargs):
"""
Initialize instance with server info and client info.
:param server_ip: Ip of server.
:param server_user: Username to login server.
:param server_pwd: Password for server_user.
:param client_ip: IP of client.
:param client_user: Username to login client.
:param client_pwd: Password for client_user.
:param server_session: Session to server and execute command on
server.
:param client_session: Session to client and execute command on
client.
:param tmp_dir: A tmp dir to store some tmp file.
:param auto_recover: If it is False same as the default,
conn_recover() will not called by __del__()
If it is True, Connection class will call
conn_recover() in __del__(), then user need not
call it manually. But the errors in conn_recover()
will be ignored.
Example:
::
connection = ConnectionBase(server_ip=server_ip,
server_user=server_user,
server_pwd=server_pwd,
client_ip=client_ip,
client_user=client_user,
client_pwd=client_pwd)
connection.conn_setup()
virsh.connect(URI)
connection.conn_recover()
We suggest *not* to pass auto_recover=True to __init__(),
and call conn_recover() manually when you don't need this
connection any more.
"""
init_dict = dict(*args, **dargs)
init_dict['server_ip'] = init_dict.get('server_ip', 'SERVER.IP')
init_dict['server_user'] = init_dict.get('server_user', 'root')
init_dict['server_pwd'] = init_dict.get('server_pwd', None)
init_dict['client_ip'] = init_dict.get('client_ip', 'CLIENT.IP')
init_dict['client_user'] = init_dict.get('client_user', 'root')
init_dict['client_pwd'] = init_dict.get('client_pwd', None)
init_dict['auto_recover'] = init_dict.get('auto_recover', False)
super(ConnectionBase, self).__init__(init_dict)
self.__dict_set__('client_session', None)
self.__dict_set__('server_session', None)
# make a tmp dir as a workspace
tmp_dir = tempfile.mkdtemp(dir=data_dir.get_tmp_dir())
if not os.path.isdir(tmp_dir):
os.makedirs(tmp_dir)
self.tmp_dir = tmp_dir
def __del__(self):
"""
Clean up any leftover sessions and tmp_dir.
"""
self.close_session()
if self.auto_recover:
try:
self.conn_recover()
except ConnNotImplementedError:
pass
tmp_dir = self.tmp_dir
if (tmp_dir is not None) and (os.path.exists(tmp_dir)):
shutil.rmtree(tmp_dir)
def close_session(self):
"""
If some session exists, close it down.
"""
session_list = ['client_session', 'server_session']
for session_name in session_list:
session = self.__dict_get__(session_name)
if session is not None:
session.close()
else:
continue
def conn_setup(self):
"""
waiting for implemented by subclass.
"""
raise ConnNotImplementedError('conn_setup', self.__class__)
def conn_check(self):
"""
waiting for implemented by subclass.
"""
raise ConnNotImplementedError('conn_check', self.__class__)
def conn_recover(self):
"""
waiting for implemented by subclass.
"""
raise ConnNotImplementedError('conn_recover', self.__class__)
def _new_client_session(self):
"""
Build a new client session.
"""
transport = 'ssh'
host = self.client_ip
port = 22
username = self.client_user
password = self.client_pwd
prompt = r"[\#\$]\s*$"
try:
client_session = remote.wait_for_login(transport, host, port,
username, password, prompt)
except remote.LoginTimeoutError:
raise ConnLoginError("Got a timeout error when login to client.")
except remote.LoginAuthenticationError:
raise ConnLoginError("Authentication failed to login to client.")
except remote.LoginProcessTerminatedError:
raise ConnLoginError("Host terminates during login to client.")
except remote.LoginError:
raise ConnLoginError("Some error occurs login to client failed.")
return client_session
def get_client_session(self):
"""
If the client session exists,return it.
else create a session to client and set client_session.
"""
client_session = self.__dict_get__('client_session')
if (client_session is not None) and (client_session.is_alive()):
return client_session
else:
client_session = self._new_client_session()
self.__dict_set__('client_session', client_session)
return client_session
def set_client_session(self, value):
"""
Set client session to value.
"""
if value:
message = "Forbid to set client_session to %s." % value
else:
message = "Forbid to set client_session."
raise ConnForbiddenError(message)
def del_client_session(self):
"""
Delete client session.
"""
raise ConnForbiddenError('Forbid to del client_session')
def _new_server_session(self):
"""
Build a new server session.
"""
transport = 'ssh'
host = self.server_ip
port = 22
username = self.server_user
password = self.server_pwd
prompt = r"[\#\$]\s*$"
try:
server_session = remote.wait_for_login(transport, host, port,
username, password, prompt)
except remote.LoginTimeoutError:
raise ConnLoginError("Got a timeout error when login to server.")
except remote.LoginAuthenticationError:
raise ConnLoginError("Authentication failed to login to server.")
except remote.LoginProcessTerminatedError:
raise ConnLoginError("Host terminates during login to server.")
except remote.LoginError:
raise ConnLoginError("Some error occurs login to client server.")
return server_session
def get_server_session(self):
"""
If the server session exists,return it.
else create a session to server and set server_session.
"""
server_session = self.__dict_get__('server_session')
if (server_session is not None) and (server_session.is_alive()):
return server_session
else:
server_session = self._new_server_session()
self.__dict_set__('server_session', server_session)
return server_session
def set_server_session(self, value=None):
"""
Set server session to value.
"""
if value:
message = "Forbid to set server_session to %s." % value
else:
message = "Forbid to set server_session."
raise ConnForbiddenError(message)
def del_server_session(self):
"""
Delete server session.
"""
raise ConnForbiddenError('Forbid to del server_session')
class SSHConnection(ConnectionBase):
"""
Connection of SSH transport.
Some specific variables in SSHConnection class.
ssh_rsa_pub_path: Path of id_rsa.pub, default is /root/.ssh/id_rsa.pub.
ssh_id_rsa_path: Path of id_rsa, default is /root/.ssh/id_rsa.
SSH_KEYGEN, SSH_ADD, SSH_COPY_ID, SSH_AGENT, SHELL, SSH: tools to build
a non-pwd connection.
"""
__slots__ = ('ssh_rsa_pub_path', 'ssh_id_rsa_path', 'SSH_KEYGEN',
'SSH_ADD', 'SSH_COPY_ID', 'SSH_AGENT', 'SHELL', 'SSH')
def __init__(self, *args, **dargs):
"""
Initialization of SSH connection.
(1). Call __init__ of class ConnectionBase.
(2). Initialize tools will be used in conn setup.
"""
init_dict = dict(*args, **dargs)
init_dict['ssh_rsa_pub_path'] = init_dict.get('ssh_rsa_pub_path',
'/root/.ssh/id_rsa.pub')
init_dict['ssh_id_rsa_path'] = init_dict.get('ssh_id_rsa_path',
'/root/.ssh/id_rsa')
super(SSHConnection, self).__init__(init_dict)
# set the tool for ssh setup.
tool_dict = {'SSH_KEYGEN': 'ssh-keygen',
'SSH_ADD': 'ssh-add',
'SSH_COPY_ID': 'ssh-copy-id',
'SSH_AGENT': 'ssh-agent',
'SHELL': 'sh',
'SSH': 'ssh'}
for key in tool_dict:
toolName = tool_dict[key]
try:
tool = os_dep.command(toolName)
except ValueError:
logging.debug("%s executable not set or found on path,"
"some function of connection will fail.",
toolName)
tool = '/bin/true'
self.__dict_set__(key, tool)
def conn_check(self):
"""
Check the SSH connection.
(1).Initialize some variables.
(2).execute ssh command to check conn.
"""
client_session = self.client_session
server_user = self.server_user
server_ip = self.server_ip
ssh = self.SSH
if ssh is '/bin/true':
raise ConnToolNotFoundError('ssh',
"executable not set or found on path, ")
cmd = "%s %s@%s exit 0" % (ssh, server_user, server_ip)
try:
client_session.cmd(cmd, timeout=5)
except aexpect.ShellError, detail:
client_session.close()
raise SSHCheckError(server_ip, detail)
logging.debug("Check the SSH to %s OK.", server_ip)
def conn_recover(self):
"""
Clean up authentication host.
"""
# initialize variables
server_ip = self.server_ip
server_user = self.server_user
server_pwd = self.server_pwd
client_ip = self.client_ip
ssh_authorized_keys_path = '/root/.ssh/authorized_keys'
cmd = "rm -rf %s" % ssh_authorized_keys_path
server_session = remote.wait_for_login('ssh', server_ip, '22',
server_user, server_pwd,
r"[\#\$]\s*$")
# remove authentication file
status, output = server_session.cmd_status_output(cmd)
if status:
raise SSHRmAuthKeysError(ssh_authorized_keys_path, output)
# restart libvirtd service on server
try:
libvirtd_service = utils_libvirtd.Libvirtd(session=server_session)
libvirtd_service.restart()
server_session.close()
except (remote.LoginError, aexpect.ShellError), detail:
server_session.close()
raise ConnServerRestartError(detail)
logging.debug("SSH authentication recover successfully.")
def conn_setup(self):
"""
Setup of SSH connection.
(1).Initialization of some variables.
(2).Check tools.
(3).Initialization of id_rsa.
(4).set a ssh_agent.
(5).copy pub key to server.
"""
client_session = self.client_session
ssh_rsa_pub_path = self.ssh_rsa_pub_path
ssh_id_rsa_path = self.ssh_id_rsa_path
server_user = self.server_user
server_ip = self.server_ip
server_pwd = self.server_pwd
ssh_keygen = self.SSH_KEYGEN
ssh_add = self.SSH_ADD
ssh_copy_id = self.SSH_COPY_ID
ssh_agent = self.SSH_AGENT
shell = self.SHELL
tool_dict = {'ssh_keygen': ssh_keygen,
'ssh_add': ssh_add,
'ssh_copy_id': ssh_copy_id,
'ssh_agent': ssh_agent,
'shell': shell}
for tool_name in tool_dict:
tool = tool_dict[tool_name]
if tool is '/bin/true':
raise ConnToolNotFoundError(tool_name,
"executable not set or found on path,")
if os.path.exists("/root/.ssh/id_rsa"):
pass
else:
cmd = "%s -t rsa -f /root/.ssh/id_rsa -N '' " % (ssh_keygen)
status, output = client_session.cmd_status_output(cmd)
if status:
raise ConnCmdClientError(cmd, output)
cmd = "%s %s" % (ssh_agent, shell)
status, output = client_session.cmd_status_output(cmd)
if status:
raise ConnCmdClientError(cmd, output)
cmd = "%s %s" % (ssh_add, ssh_id_rsa_path)
status, output = client_session.cmd_status_output(cmd)
if status:
raise ConnCmdClientError(cmd, output)
cmd = "%s -i %s %s@%s" % (ssh_copy_id, ssh_rsa_pub_path,
server_user, server_ip)
client_session.sendline(cmd)
try:
remote.handle_prompts(client_session, server_user,
server_pwd, prompt=r"[\#\$]\s*$")
except remote.LoginError, detail:
raise ConnCmdClientError(cmd, detail)
client_session.close()
logging.debug("SSH connection setup successfully.")
class TCPConnection(ConnectionBase):
"""
Connection class for TCP transport.
Some specific variables for TCPConnection class.
"""
__slots__ = ('tcp_port', 'remote_syslibvirtd',
'remote_libvirtdconf', 'sasl_allowed_users',
'auth_tcp', 'listen_addr')
def __init__(self, *args, **dargs):
"""
init params for TCP connection and init tmp_dir.
:param tcp_port: Port of tcp connection, default is 16509.
:param sysconfig_libvirtd_path: Path of libvirtd file, default is
``/etc/sysconfig/libvirtd``.
:param libvirtd_conf_path: Path of libvirtd.conf, default is
``/etc/libvirt/libvirtd.conf``.
"""
init_dict = dict(*args, **dargs)
init_dict['tcp_port'] = init_dict.get('tcp_port', '16509')
init_dict['auth_tcp'] = init_dict.get('auth_tcp', 'none')
init_dict['listen_addr'] = init_dict.get('listen_addr')
init_dict['sasl_allowed_users'] = init_dict.get('sasl_allowed_users')
super(TCPConnection, self).__init__(init_dict)
self.remote_syslibvirtd = remote.RemoteFile(
address=self.server_ip,
client='scp',
username=self.server_user,
password=self.server_pwd,
port='22',
remote_path='/etc/sysconfig/libvirtd')
self.remote_libvirtdconf = remote.RemoteFile(
address=self.server_ip,
client='scp',
username=self.server_user,
password=self.server_pwd,
port='22',
remote_path='/etc/libvirt/libvirtd.conf')
def conn_recover(self):
"""
Clean up for TCP connection.
(1).initialize variables.
(2).Delete the RemoteFile.
(3).restart libvirtd on server.
"""
# initialize variables
server_ip = self.server_ip
server_user = self.server_user
server_pwd = self.server_pwd
# delete the RemoteFile object to recover remote file.
del self.remote_syslibvirtd
del self.remote_libvirtdconf
# restart libvirtd service on server
try:
session = remote.wait_for_login('ssh', server_ip, '22',
server_user, server_pwd,
r"[\#\$]\s*$")
libvirtd_service = utils_libvirtd.Libvirtd(session=session)
libvirtd_service.restart()
except (remote.LoginError, aexpect.ShellError), detail:
raise ConnServerRestartError(detail)
logging.debug("TCP connection recover successfully.")
def conn_setup(self):
"""
Enable tcp connect of libvirtd on server.
(1).initialization for variables.
(2).edit /etc/sysconfig/libvirtd on server.
(3).edit /etc/libvirt/libvirtd.conf on server.
(4).restart libvirtd service on server.
"""
# initialize variables
server_ip = self.server_ip
server_user = self.server_user
server_pwd = self.server_pwd
tcp_port = self.tcp_port
auth_tcp = self.auth_tcp
# require a list data type
sasl_allowed_users = self.sasl_allowed_users
listen_addr = self.listen_addr
# edit the /etc/sysconfig/libvirtd to add --listen args in libvirtd
pattern2repl = {r".*LIBVIRTD_ARGS\s*=\s*\"\s*--listen\s*\".*":
"LIBVIRTD_ARGS=\"--listen\""}
self.remote_syslibvirtd.sub_else_add(pattern2repl)
# edit the /etc/libvirt/libvirtd.conf
# listen_tcp=1, tcp_port=$tcp_port, auth_tcp="none"
# listen_tcp=1, tcp_port=$tcp_port, auth_tcp=$auth_tcp
pattern2repl = {r".*listen_tls\s*=.*": 'listen_tls=0',
r".*listen_tcp\s*=.*": 'listen_tcp=1',
r".*tcp_port\s*=.*": 'tcp_port="%s"' % (tcp_port),
r".*auth_tcp\s*=.*": 'auth_tcp="%s"' % (auth_tcp)}
# a whitelist of allowed SASL usernames, it's a list.
# If the list is an empty, no client can connect
if sasl_allowed_users:
pattern2repl[r".*sasl_allowed_username_list\s*=.*"] = \
'sasl_allowed_username_list=%s' % (sasl_allowed_users)
if listen_addr:
pattern2repl[r".*listen_addr\s*=.*"] = \
"listen_addr='%s'" % (listen_addr)
self.remote_libvirtdconf.sub_else_add(pattern2repl)
# restart libvirtd service on server
try:
session = remote.wait_for_login('ssh', server_ip, '22',
server_user, server_pwd,
r"[\#\$]\s*$")
libvirtd_service = utils_libvirtd.Libvirtd(session=session)
libvirtd_service.restart()
except (remote.LoginError, aexpect.ShellError), detail:
raise ConnServerRestartError(detail)
logging.debug("TCP connection setup successfully.")
class TLSConnection(ConnectionBase):
"""
Connection of TLS transport.
Some specific variables for TLSConnection class.
server_cn, client_cn, ca_cn: Info to build pki key.
CERTOOL: tool to build key for TLS connection.
pki_CA_dir: Dir to store CA key.
libvirt_pki_dir, libvirt_pki_private_dir: Dir to store pki in libvirt.
sysconfig_libvirtd_path, libvirtd_conf_path: Path of libvirt config file.
hosts_path: /etc/hosts
auth_tls, tls_port, listen_addr: custom TLS Auth, port and listen address
tls_allowed_dn_list: DN's list are checked
tls_verify_cert: disable verification, default is to always verify
tls_sanity_cert: disable checks, default is to always run sanity checks
custom_pki_path: custom pki path
ca_cakey_path: CA certification path, sometimes need to reuse previous cert
scp_new_cacert: copy new CA certification, default is to always copy
restart_libvirtd: default is to restart libvirtd
"""
__slots__ = ('server_cn', 'client_cn', 'ca_cn', 'CERTTOOL', 'pki_CA_dir',
'libvirt_pki_dir', 'libvirt_pki_private_dir', 'client_hosts',
'server_libvirtdconf', 'server_syslibvirtd', 'auth_tls',
'tls_port', 'listen_addr', 'tls_allowed_dn_list',
'custom_pki_path', 'tls_verify_cert', 'tls_sanity_cert',
'ca_cakey_path', 'scp_new_cacert', 'restart_libvirtd')
def __init__(self, *args, **dargs):
"""
Initialization of TLSConnection.
(1).call the init func in ConnectionBase.
(2).check and set CERTTOOL.
(3).make a tmp directory as a workspace.
(4).set values of pki related.
"""
init_dict = dict(*args, **dargs)
init_dict['server_cn'] = init_dict.get('server_cn', 'TLSServer')
init_dict['client_cn'] = init_dict.get('client_cn', 'TLSClient')
init_dict['ca_cn'] = init_dict.get('ca_cn', 'AUTOTEST.VIRT')
init_dict['ca_cakey_path'] = init_dict.get('ca_cakey_path', None)
init_dict['auth_tls'] = init_dict.get('auth_tls', 'none')
init_dict['tls_port'] = init_dict.get('tls_port', '16514')
init_dict['listen_addr'] = init_dict.get('listen_addr')
init_dict['custom_pki_path'] = init_dict.get('custom_pki_path')
init_dict['tls_verify_cert'] = init_dict.get('tls_verify_cert', 'yes')
init_dict['tls_sanity_cert'] = init_dict.get('tls_sanity_cert', 'yes')
init_dict['tls_allowed_dn_list'] = init_dict.get('tls_allowed_dn_list')
init_dict['scp_new_cacert'] = init_dict.get('scp_new_cacert', 'yes')
init_dict['restart_libvirtd'] = init_dict.get('restart_libvirtd', 'yes')
super(TLSConnection, self).__init__(init_dict)
# check and set CERTTOOL in slots
try:
CERTTOOL = os_dep.command("certtool")
except ValueError:
logging.warning("certtool executable not set or found on path, "
"TLS connection will not setup normally")
CERTTOOL = '/bin/true'
self.CERTTOOL = CERTTOOL
# set some pki related dir values
if not self.custom_pki_path:
self.pki_CA_dir = ('/etc/pki/CA/')
self.libvirt_pki_dir = ('/etc/pki/libvirt/')
self.libvirt_pki_private_dir = ('/etc/pki/libvirt/private/')
else:
# set custom certifications path
dir_dict = {'CA': 'pki_CA_dir',
'libvirt': 'libvirt_pki_dir',
'libvirt/private': 'libvirt_pki_private_dir'}
if not os.path.exists(self.custom_pki_path):
os.makedirs(self.custom_pki_path)
for dir_name in dir_dict:
setattr(self, dir_dict[dir_name], self.custom_pki_path)
self.client_hosts = remote.RemoteFile(address=self.client_ip,
client='scp',
username=self.client_user,
password=self.client_pwd,
port='22',
remote_path='/etc/hosts')
self.server_syslibvirtd = remote.RemoteFile(
address=self.server_ip,
client='scp',
username=self.server_user,
password=self.server_pwd,
port='22',
remote_path='/etc/sysconfig/libvirtd')
self.server_libvirtdconf = remote.RemoteFile(
address=self.server_ip,
client='scp',
username=self.server_user,
password=self.server_pwd,
port='22',
remote_path='/etc/libvirt/libvirtd.conf')
def conn_recover(self):
"""
Do the clean up work.
(1).initialize variables.
(2).Delete remote file.
(3).Restart libvirtd on server.
"""
# clean up certifications firstly
if self.auto_recover:
self.cert_recover()
# initialize variables
server_ip = self.server_ip
server_user = self.server_user
server_pwd = self.server_pwd
del self.client_hosts
del self.server_syslibvirtd
del self.server_libvirtdconf
# restart libvirtd service on server
try:
session = remote.wait_for_login('ssh', server_ip, '22',
server_user, server_pwd,
r"[\#\$]\s*$")
libvirtd_service = utils_libvirtd.Libvirtd(session=session)
libvirtd_service.restart()
except (remote.LoginError, aexpect.ShellError), detail:
raise ConnServerRestartError(detail)
logging.debug("TLS connection recover successfully.")
def cert_recover(self):
"""
Do the clean up certifications work.
(1).initialize variables.
(2).Delete local and remote generated certifications file.
"""
# initialize variables
server_ip = self.server_ip
server_user = self.server_user
server_pwd = self.server_pwd
cert_dict = {'CA': '%s*' % self.pki_CA_dir,
'cert': self.libvirt_pki_dir,
'key': self.libvirt_pki_private_dir}
# remove local generated certifications file
for cert in cert_dict:
cert_path = cert_dict[cert]
cmd = "rm -rf %s" % cert_path
if os.path.exists(cert_path):
shutil.rmtree(cert_path)
else:
status, output = commands.getstatusoutput(cmd)
if status:
raise ConnRmCertError(cert_path, output)
# remove remote generated certifications file
server_session = remote.wait_for_login('ssh', server_ip, '22',
server_user, server_pwd,
r"[\#\$]\s*$")
for cert in cert_dict:
cert_path = cert_dict[cert]
cmd = "rm -rf %s" % cert_path
status, output = server_session.cmd_status_output(cmd)
if status:
raise ConnRmCertError(cert_path, output)
server_session.close()
logging.debug("TLS certifications recover successfully.")
def conn_setup(self, server_setup=True, client_setup=True):
"""
setup a TLS connection between server and client.
At first check the certtool needed to setup.
Then call some setup functions to complete connection setup.
"""
if self.CERTTOOL == '/bin/true':
raise ConnToolNotFoundError('certtool',
"certtool executable not set or found on path.")
# support build multiple CAs with different CA CN
build_CA(self.tmp_dir, self.ca_cn, self.ca_cakey_path, self.CERTTOOL)
# not always need to setup CA, client and server together
if server_setup:
self.server_setup()
if client_setup:
self.client_setup()
self.close_session()
logging.debug("TLS connection setup successfully.")
def server_setup(self):
"""
setup private key and certificate file for server.
(1).initialization for variables.
(2).build server key.
(3).copy files to server.
(4).edit /etc/sysconfig/libvirtd on server.
(5).edit /etc/libvirt/libvirtd.conf on server.
(6).restart libvirtd service on server.
"""
# initialize variables
tmp_dir = self.tmp_dir
scp_new_cacert = self.scp_new_cacert
# sometimes, need to reuse previous CA cert
if self.ca_cakey_path and scp_new_cacert == 'no':
cacert_path = '%s/cacert.pem' % self.ca_cakey_path
else:
cacert_path = '%s/cacert.pem' % tmp_dir
serverkey_path = '%s/serverkey.pem' % tmp_dir
servercert_path = '%s/servercert.pem' % tmp_dir
server_ip = self.server_ip
server_user = self.server_user
server_pwd = self.server_pwd
auth_tls = self.auth_tls
tls_port = self.tls_port
listen_addr = self.listen_addr
restart_libvirtd = self.restart_libvirtd
tls_allowed_dn_list = self.tls_allowed_dn_list
pki_path = self.custom_pki_path
tls_verify_cert = self.tls_verify_cert
tls_sanity_cert = self.tls_sanity_cert
# build a server key.
build_server_key(tmp_dir, self.ca_cakey_path,
self.server_cn, self.CERTTOOL)
# scp cacert.pem, servercert.pem and serverkey.pem to server.
server_session = self.server_session
cmd = "mkdir -p %s" % self.libvirt_pki_private_dir
status, output = server_session.cmd_status_output(cmd)
if status:
raise ConnMkdirError(self.libvirt_pki_private_dir, output)
scp_dict = {cacert_path: self.pki_CA_dir,
servercert_path: self.libvirt_pki_dir,
serverkey_path: self.libvirt_pki_private_dir}
for key in scp_dict:
local_path = key
remote_path = scp_dict[key]
try:
remote.copy_files_to(server_ip, 'scp', server_user,
server_pwd, '22', local_path, remote_path)
except remote.SCPError, detail:
raise ConnSCPError('AdminHost', local_path,
server_ip, remote_path, detail)
# edit the /etc/sysconfig/libvirtd to add --listen args in libvirtd
pattern2repl = {r".*LIBVIRTD_ARGS\s*=\s*\"\s*--listen\s*\".*":
"LIBVIRTD_ARGS=\"--listen\""}
self.server_syslibvirtd.sub_else_add(pattern2repl)
# edit the /etc/libvirt/libvirtd.conf to add listen_tls=1
pattern2repl = {r".*listen_tls\s*=\s*.*": "listen_tls=1"}
self.server_libvirtdconf.sub_else_add(pattern2repl)
# edit the /etc/libvirt/libvirtd.conf to add
# listen_addr=$listen_addr
if listen_addr:
pattern2repl[r".*listen_addr\s*=.*"] = \
"listen_addr='%s'" % (listen_addr)
self.server_libvirtdconf.sub_else_add(pattern2repl)
# edit the /etc/libvirt/libvirtd.conf to add auth_tls=$auth_tls
if auth_tls != 'none':
pattern2repl = {r".*auth_tls\s*=\s*.*": 'auth_tls="%s"' % auth_tls}
self.server_libvirtdconf.sub_else_add(pattern2repl)
# edit the /etc/libvirt/libvirtd.conf to add tls_port=$tls_port
if tls_port != '16514':
pattern2repl = {r".*tls_port\s*=\s*.*": 'tls_port="%s"' % tls_port}
self.server_libvirtdconf.sub_else_add(pattern2repl)
# edit the /etc/libvirt/libvirtd.conf to add
# tls_allowed_dn_list=$tls_allowed_dn_list
if isinstance(tls_allowed_dn_list, list):
pattern2repl = {r".*tls_allowed_dn_list\s*=\s*.*":
'tls_allowed_dn_list=%s' % tls_allowed_dn_list}
self.server_libvirtdconf.sub_else_add(pattern2repl)
# edit the /etc/libvirt/libvirtd.conf to override
# the default server certification file path
if pki_path:
cert_path_dict = {'ca_file': cacert_path,
'key_file': serverkey_path,
'cert_file': servercert_path}
pattern2repl = {}
for cert_name in cert_path_dict:
cert_file = os.path.basename(cert_path_dict[cert_name])
abs_cert_file = os.path.join(pki_path, cert_file)
pattern2repl[r".*%s\s*=.*" % (cert_name)] = \
'%s="%s"' % (cert_name, abs_cert_file)
self.server_libvirtdconf.sub_else_add(pattern2repl)
# edit the /etc/libvirt/libvirtd.conf to disable client verification
if tls_verify_cert == "no":
pattern2repl = {r".*tls_no_verify_certificate\s*=\s*.*":
'tls_no_verify_certificate=1'}
self.server_libvirtdconf.sub_else_add(pattern2repl)
# edit the /etc/libvirt/libvirtd.conf to disable server sanity checks
if tls_sanity_cert == "no":
pattern2repl = {r".*tls_no_sanity_certificate\s*=\s*.*":
'tls_no_sanity_certificate=1'}
self.server_libvirtdconf.sub_else_add(pattern2repl)
# restart libvirtd service on server
if restart_libvirtd == "yes":
try:
session = remote.wait_for_login('ssh', server_ip, '22',
server_user, server_pwd,
r"[\#\$]\s*$")
libvirtd_service = utils_libvirtd.Libvirtd(session=session)
libvirtd_service.restart()
except (remote.LoginError, aexpect.ShellError), detail:
raise ConnServerRestartError(detail)
def client_setup(self):
"""
setup private key and certificate file for client.
(1).initialization for variables.
(2).build a key for client.
(3).copy files to client.
(4).edit /etc/hosts on client.
"""
# initialize variables
tmp_dir = self.tmp_dir
cacert_path = '%s/cacert.pem' % tmp_dir
clientkey_path = '%s/clientkey.pem' % tmp_dir
clientcert_path = '%s/clientcert.pem' % tmp_dir
client_ip = self.client_ip
client_user = self.client_user
client_pwd = self.client_pwd
# build a client key.
build_client_key(tmp_dir, self.client_cn, self.CERTTOOL)
# scp cacert.pem, clientcert.pem and clientkey.pem to client.
client_session = self.client_session
cmd = "mkdir -p %s" % self.libvirt_pki_private_dir
status, output = client_session.cmd_status_output(cmd)
if status:
raise ConnMkdirError(self.libvirt_pki_private_dir, output)
scp_dict = {cacert_path: self.pki_CA_dir,
clientcert_path: self.libvirt_pki_dir,
clientkey_path: self.libvirt_pki_private_dir}
for key in scp_dict:
local_path = key
remote_path = scp_dict[key]
try:
remote.copy_files_to(client_ip, 'scp', client_user,
client_pwd, '22', local_path, remote_path)
except remote.SCPError, detail:
raise ConnSCPError('AdminHost', local_path,
client_ip, remote_path, detail)
# edit /etc/hosts on client
pattern2repl = {r".*%s.*" % self.server_cn:
"%s %s" % (self.server_ip, self.server_cn)}
self.client_hosts.sub_else_add(pattern2repl)
def build_client_key(tmp_dir, client_cn="TLSClient", certtool="certtool"):
"""
(1).initialization for variables.
(2).make a private key with certtool command.
(3).prepare a info file.
(4).make a certificate file with certtool command.
"""
# Initialize variables
cakey_path = '%s/cakey.pem' % tmp_dir
cacert_path = '%s/cacert.pem' % tmp_dir
clientkey_path = '%s/clientkey.pem' % tmp_dir
clientcert_path = '%s/clientcert.pem' % tmp_dir
clientinfo_path = '%s/client.info' % tmp_dir
# make a private key.
cmd = "%s --generate-privkey > %s" % (certtool, clientkey_path)
CmdResult = utils.run(cmd, ignore_status=True)
if CmdResult.exit_status:
raise ConnPrivKeyError(CmdResult.stderr)
# prepare a info file to build clientcert.
clientinfo_file = open(clientinfo_path, "w")
clientinfo_file.write("organization = AUTOTEST.VIRT\n")
clientinfo_file.write("cn = %s\n" % (client_cn))
clientinfo_file.write("tls_www_client\n")
clientinfo_file.write("encryption_key\n")
clientinfo_file.write("signing_key\n")
clientinfo_file.close()
# make a client certificate file and a client key file.
cmd = ("%s --generate-certificate --load-privkey %s \
--load-ca-certificate %s --load-ca-privkey %s \
--template %s --outfile %s" %
(certtool, clientkey_path, cacert_path,
cakey_path, clientinfo_path, clientcert_path))
CmdResult = utils.run(cmd, ignore_status=True)
if CmdResult.exit_status:
raise ConnCertError(clientinfo_path, CmdResult.stderr)
def build_server_key(tmp_dir, ca_cakey_path=None,
server_cn="TLSServer", certtool="certtool"):
"""
(1).initialization for variables.
(2).make a private key with certtool command.
(3).prepare a info file.
(4).make a certificate file with certtool command.
"""
# initialize variables
# sometimes, need to reuse previous CA cert
if not ca_cakey_path:
cakey_path = '%s/cakey.pem' % tmp_dir
else:
cakey_path = '%s/cakey.pem' % ca_cakey_path
serverkey_path = '%s/serverkey.pem' % tmp_dir
cacert_path = '%s/cacert.pem' % tmp_dir
servercert_path = '%s/servercert.pem' % tmp_dir
serverinfo_path = '%s/server.info' % tmp_dir
# make a private key
cmd = "%s --generate-privkey > %s" % (certtool, serverkey_path)
cmd_result = utils.run(cmd, ignore_status=True)
if cmd_result.exit_status:
raise ConnPrivKeyError(serverkey_path, cmd_result.stderr)
# prepare a info file to build servercert and serverkey
serverinfo_file = open(serverinfo_path, "w")
serverinfo_file.write("organization = AUTOTEST.VIRT\n")
serverinfo_file.write("cn = %s\n" % (server_cn))
serverinfo_file.write("tls_www_server\n")
serverinfo_file.write("encryption_key\n")
serverinfo_file.write("signing_key\n")
serverinfo_file.close()
# make a server certificate file and a server key file
cmd = ("%s --generate-certificate --load-privkey %s \
--load-ca-certificate %s --load-ca-privkey %s \
--template %s --outfile %s" %
(certtool, serverkey_path, cacert_path,
cakey_path, serverinfo_path, servercert_path))
CmdResult = utils.run(cmd, ignore_status=True)
if CmdResult.exit_status:
raise ConnCertError(serverinfo_path, CmdResult.stderr)
def build_CA(tmp_dir, cn="AUTOTEST.VIRT", ca_cakey_path=None, certtool="certtool"):
"""
setup private key and certificate file which are needed to build.
certificate file for client and server.
(1).initialization for variables.
(2).make a private key with certtool command.
(3).prepare a info file.
(4).make a certificate file with certtool command.
"""
# initialize variables
if not ca_cakey_path:
cakey_path = '%s/cakey.pem' % tmp_dir
else:
cakey_path = '%s/cakey.pem' % ca_cakey_path
cainfo_path = '%s/ca.info' % tmp_dir
cacert_path = '%s/cacert.pem' % tmp_dir
# make a private key
# sometimes, may reuse previous CA cert, so don't always need to
# generate private key
if not ca_cakey_path:
cmd = "%s --generate-privkey > %s " % (certtool, cakey_path)
cmd_result = utils.run(cmd, ignore_status=True, timeout=10)
if cmd_result.exit_status:
raise ConnPrivKeyError(cakey_path, cmd_result.stderr)
# prepare a info file to build certificate file
cainfo_file = open(cainfo_path, "w")
cainfo_file.write("cn = %s\n" % cn)
cainfo_file.write("ca\n")
cainfo_file.write("cert_signing_key\n")
cainfo_file.close()
# make a certificate file to build clientcert and servercert
cmd = ("%s --generate-self-signed --load-privkey %s\
--template %s --outfile %s" %
(certtool, cakey_path, cainfo_path, cacert_path))
CmdResult = utils.run(cmd, ignore_status=True)
if CmdResult.exit_status:
raise ConnCertError(cainfo_path, CmdResult.stderr)
class UNIXConnection(ConnectionBase):
"""
Connection class for UNIX transport.
Some specific variables for UNIXConnection class.
"""
__slots__ = ('auth_unix_ro', 'auth_unix_rw', 'unix_sock_dir',
'unix_sock_group', 'unix_sock_ro_perms',
'unix_sock_rw_perms', 'access_drivers',
'client_ip', 'client_user', 'client_pwd',
'client_libvirtdconf', 'restart_libvirtd')
def __init__(self, *args, **dargs):
"""
init params for UNIX connection.
:param auth_unix_ro: UNIX R/O sockets, default is 'none'.
:param auth_unix_rw: UNIX R/W sockets, default is 'none'.
:param unix_sock_group: UNIX domain socket group ownership,
default is 'libvirt'.
:param access_drivers: access control restrictions,
default is '["polkit"]'.
:param unix_sock_ro_perms: UNIX socket permissions for the
R/O socket, default is '0777'.
:param unix_sock_rw_perms: UNIX socket permissions for the
R/W socket, default is '0770'.
:param client_libvirtdconf: Path of client libvirtd.conf, default is
'/etc/libvirt/libvirtd.conf'.
:param restart_libvirtd: default is to restart libvirtd.
"""
init_dict = dict(*args, **dargs)
init_dict['auth_unix_ro'] = init_dict.get('auth_unix_ro', 'none')
init_dict['auth_unix_rw'] = init_dict.get('auth_unix_rw', 'none')
init_dict['unix_sock_dir'] = init_dict.get('unix_sock_dir', '/var/run/libvirt')
init_dict['unix_sock_group'] = init_dict.get('unix_sock_group', 'libvirt')
init_dict['access_drivers'] = init_dict.get('access_drivers', ["polkit"])
init_dict['unix_sock_ro_perms'] = init_dict.get('unix_sock_ro_perms', '0777')
init_dict['unix_sock_rw_perms'] = init_dict.get('unix_sock_rw_perms', '0770')
init_dict['restart_libvirtd'] = init_dict.get('restart_libvirtd', 'yes')
super(UNIXConnection, self).__init__(init_dict)
self.client_libvirtdconf = remote.RemoteFile(
address=self.client_ip,
client='scp',
username=self.client_user,
password=self.client_pwd,
port='22',
remote_path='/etc/libvirt/libvirtd.conf')
def conn_recover(self):
"""
Do the clean up work.
(1).Delete remote file.
(2).Restart libvirtd on server.
"""
del self.client_libvirtdconf
# restart libvirtd service on server
client_session = self.client_session
try:
libvirtd_service = utils_libvirtd.Libvirtd(session=client_session)
libvirtd_service.restart()
except (remote.LoginError, aexpect.ShellError), detail:
raise ConnServerRestartError(detail)
logging.debug("UNIX connection recover successfully.")
def conn_setup(self):
"""
Setup a UNIX connection.
(1).Initialize variables.
(2).Update libvirtd.conf configuration.
(3).Restart libvirtd on client.
"""
# initialize variables
auth_unix_ro = self.auth_unix_ro
auth_unix_rw = self.auth_unix_rw
unix_sock_group = self.unix_sock_group
unix_sock_dir = self.unix_sock_dir
unix_sock_ro_perms = self.unix_sock_ro_perms
unix_sock_rw_perms = self.unix_sock_rw_perms
access_drivers = self.access_drivers
restart_libvirtd = self.restart_libvirtd
client_session = self.client_session
# edit the /etc/libvirt/libvirtd.conf to add auth_unix_ro arg
if auth_unix_ro:
pattern2repl = {r".*auth_unix_ro\s*=.*":
'auth_unix_ro="%s"' % auth_unix_ro}
self.client_libvirtdconf.sub_else_add(pattern2repl)
# edit the /etc/libvirt/libvirtd.conf to add auth_unix_rw arg
if auth_unix_rw:
pattern2repl = {r".*auth_unix_rw\s*=.*":
'auth_unix_rw="%s"' % auth_unix_rw}
self.client_libvirtdconf.sub_else_add(pattern2repl)
# edit the /etc/libvirt/libvirtd.conf to add unix_sock_group arg
if unix_sock_group != 'libvirt':
pattern2repl = {r".*unix_sock_group\s*=.*":
'unix_sock_group="%s"' % unix_sock_group}
self.client_libvirtdconf.sub_else_add(pattern2repl)
# edit the /etc/libvirt/libvirtd.conf to add unix_sock_dir arg
if unix_sock_dir != '/var/run/libvirt':
pattern2repl = {r".*unix_sock_dir\s*=.*":
'unix_sock_dir="%s"' % unix_sock_dir}
self.client_libvirtdconf.sub_else_add(pattern2repl)
# edit the /etc/libvirt/libvirtd.conf to add access_drivers arg
if access_drivers != ["polkit"]:
pattern2repl = {r".*access_drivers\s*=.*":
'access_drivers="%s"' % access_drivers}
self.client_libvirtdconf.sub_else_add(pattern2repl)
# edit the /etc/libvirt/libvirtd.conf to add unix_sock_ro_perms arg
if unix_sock_ro_perms:
pattern2repl = {r".*unix_sock_ro_perms\s*=.*":
'unix_sock_ro_perms="%s"' % unix_sock_ro_perms}
self.client_libvirtdconf.sub_else_add(pattern2repl)
# edit the /etc/libvirt/libvirtd.conf to add unix_sock_rw_perms arg
if unix_sock_rw_perms:
pattern2repl = {r".*unix_sock_rw_perms\s*=.*":
'unix_sock_rw_perms="%s"' % unix_sock_rw_perms}
self.client_libvirtdconf.sub_else_add(pattern2repl)
# restart libvirtd service on server
if restart_libvirtd == "yes":
try:
libvirtd_service = utils_libvirtd.Libvirtd(session=client_session)
libvirtd_service.restart()
except (remote.LoginError, aexpect.ShellError), detail:
raise ConnServerRestartError(detail)
logging.debug("UNIX connection setup successfully.")
|
autotest/virt-test
|
virttest/utils_conn.py
|
Python
|
gpl-2.0
| 52,295
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
class WebPubSubManagementClientConfiguration(Configuration):
"""Configuration for WebPubSubManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Gets subscription Id which uniquely identify the Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(WebPubSubManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2021-10-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-webpubsub/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
|
Azure/azure-sdk-for-python
|
sdk/webpubsub/azure-mgmt-webpubsub/azure/mgmt/webpubsub/_configuration.py
|
Python
|
mit
| 3,388
|
# Copyright (c) 2019, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc('stock', 'doctype', 'purchase_receipt')
frappe.reload_doc('stock', 'doctype', 'purchase_receipt_item')
frappe.reload_doc('stock', 'doctype', 'delivery_note')
frappe.reload_doc('stock', 'doctype', 'delivery_note_item')
def update_from_return_docs(doctype):
for return_doc in frappe.get_all(doctype, filters={'is_return' : 1, 'docstatus' : 1}):
# Update original receipt/delivery document from return
return_doc = frappe.get_cached_doc(doctype, return_doc.name)
return_doc.update_prevdoc_status()
return_against = frappe.get_doc(doctype, return_doc.return_against)
return_against.update_billing_status()
# Set received qty in stock uom in PR, as returned qty is checked against it
frappe.db.sql(""" update `tabPurchase Receipt Item`
set received_stock_qty = received_qty * conversion_factor
where docstatus = 1 """)
for doctype in ('Purchase Receipt', 'Delivery Note'):
update_from_return_docs(doctype)
|
saurabh6790/erpnext
|
erpnext/patches/v13_0/update_returned_qty_in_pr_dn.py
|
Python
|
gpl-3.0
| 1,127
|
#!/usr/bin/env python
# Modified from http://www.pygame.org/project-Very+simple+Pong+game-816-.html
import pygame
import os
from pygame.locals import *
import pygame.surfarray as surfarray
position = 5, 325
os.environ['SDL_VIDEO_WINDOW_POS'] = str(position[0]) + "," + str(position[1])
pygame.init()
screen = pygame.display.set_mode((640, 480), 0, 32)
# Creating 2 bars, a ball and background.
back = pygame.Surface((640, 480))
background = back.convert()
background.fill((0, 0, 0))
bar = pygame.Surface((10,50))
bar1 = bar.convert()
bar1.fill((0, 255, 255))
bar2 = bar.convert()
bar2.fill((255, 255, 255))
circ_sur = pygame.Surface((15, 15))
circ = pygame.draw.circle(circ_sur,(255,255,255),(15//2,15//2),15//2)
circle = circ_sur.convert()
circle.set_colorkey((0,0,0))
font = pygame.font.SysFont("calibri",40)
ai_speed = 15.
HIT_REWARD = 0
LOSE_REWARD = -1
SCORE_REWARD = 1
class GameState(object):
def __init__(self):
self.bar1_x, self.bar2_x = 10. , 620.
self.bar1_y, self.bar2_y = 215. , 215.
self.circle_x, self.circle_y = 307.5, 232.5
self.bar1_move, self.bar2_move = 0. , 0.
self.bar1_score, self.bar2_score = 0,0
self.speed_x, self.speed_y = 7., 7.
def frame_step(self,input_vect):
pygame.event.pump()
reward = 0
if sum(input_vect) != 1:
raise ValueError('Multiple input actions!')
if input_vect[1] == 1: # Key up
self.bar1_move = -ai_speed
elif input_vect[2] == 1: # Key down
self.bar1_move = ai_speed
else: # don't move
self.bar1_move = 0
self.score1 = font.render(str(self.bar1_score), True,(255,255,255))
self.score2 = font.render(str(self.bar2_score), True,(255,255,255))
screen.blit(background,(0,0))
frame = pygame.draw.rect(screen,(255,255,255),Rect((5,5),(630,470)),2)
middle_line = pygame.draw.aaline(screen,(255,255,255),(330,5),(330,475))
screen.blit(bar1,(self.bar1_x,self.bar1_y))
screen.blit(bar2,(self.bar2_x,self.bar2_y))
screen.blit(circle,(self.circle_x,self.circle_y))
screen.blit(self.score1,(250.,210.))
screen.blit(self.score2,(380.,210.))
self.bar1_y += self.bar1_move
#AI of the computer.
if True:
if not self.bar2_y == self.circle_y + 7.5:
if self.bar2_y < self.circle_y + 7.5:
self.bar2_y += ai_speed
if self.bar2_y > self.circle_y - 42.5:
self.bar2_y -= ai_speed
else:
self.bar2_y == self.circle_y + 7.5
# bounds of movement
if self.bar1_y >= 420.: self.bar1_y = 420.
elif self.bar1_y <= 10. : self.bar1_y = 10.
if self.bar2_y >= 420.: self.bar2_y = 420.
elif self.bar2_y <= 10.: self.bar2_y = 10.
#since i don't know anything about collision, ball hitting bars goes like this.
if self.circle_x <= self.bar1_x + 10.:
if self.circle_y >= self.bar1_y - 7.5 and self.circle_y <= self.bar1_y + 42.5:
self.circle_x = 20.
self.speed_x = -self.speed_x
reward = HIT_REWARD
if self.circle_x >= self.bar2_x - 15.:
if self.circle_y >= self.bar2_y - 7.5 and self.circle_y <= self.bar2_y + 42.5:
self.circle_x = 605.
self.speed_x = -self.speed_x
# scoring
if self.circle_x < 5.:
self.bar2_score += 1
reward = LOSE_REWARD
self.circle_x, self.circle_y = 320., 232.5
self.bar1_y,self.bar_2_y = 215., 215.
elif self.circle_x > 620.:
self.bar1_score += 1
reward = SCORE_REWARD
self.circle_x, self.circle_y = 307.5, 232.5
self.bar1_y, self.bar2_y = 215., 215.
# collisions on sides
if self.circle_y <= 10.:
self.speed_y = -self.speed_y
self.circle_y = 10.
elif self.circle_y >= 457.5:
self.speed_y = -self.speed_y
self.circle_y = 457.5
self.circle_x += self.speed_x
self.circle_y += self.speed_y
image_data = pygame.surfarray.array3d(pygame.display.get_surface())
pygame.display.update()
terminal = False
if max(self.bar1_score, self.bar2_score) >= 20:
self.bar1_score = 0
self.bar2_score = 0
terminal = True
return image_data, reward, terminal
def vectorFromAction(n_actions, action_index):
import numpy as np
a = np.zeros(n_actions)
a[action_index] = 1
return a
class PongEnv(object):
def __init__(self):
from gym import spaces
self.game_state = GameState()
self.action_space = spaces.Discrete(3)
def reset(self):
x_t, _, _ = self.game_state.frame_step(
vectorFromAction(self.action_space.n, 0))
return x_t
def render(self):
pass
def step(self, action_index):
x_t1, r_t, terminal = self.game_state.frame_step(
vectorFromAction(self.action_space.n, action_index))
return x_t1, r_t, terminal, 0
|
akashin/RL_Experiments
|
DQN/games/pong.py
|
Python
|
mit
| 5,187
|
from msl.equipment.connection import Connection
from msl.equipment.connection_demo import ConnectionDemo
from msl.equipment.record_types import EquipmentRecord
from msl.equipment.resources.picotech.picoscope.picoscope import PicoScope
from msl.equipment.resources.picotech.picoscope.channel import PicoScopeChannel
class MyConnection(Connection):
def __init__(self, record):
super(MyConnection, self).__init__(record)
def get_none1(self):
"""No return type is specified."""
pass
def get_none2(self, channel):
"""This function takes 1 input but returns nothing.
Parameters
----------
channel : :obj:`str`
Some channel number
"""
pass
def get_bool1(self):
""":obj:`bool`: A boolean value."""
pass
def get_bool2(self):
"""Returns a boolean value.
Returns
-------
:obj:`bool`
A boolean value.
"""
pass
def get_string1(self):
""":obj:`str`: A string value."""
pass
def get_string2(self):
"""Returns a string value.
Returns
-------
:obj:`str`
A string value.
"""
pass
def get_bytes1(self):
""":obj:`bytes`: A bytes value."""
pass
def get_bytes2(self):
"""Returns a bytes value.
Returns
-------
:obj:`bytes`
A bytes value.
"""
pass
def get_int1(self):
""":obj:`int`: An integer value."""
pass
def get_int2(self):
"""Returns an integer value.
Returns
-------
:obj:`int`
An integer value.
"""
pass
def get_float1(self):
""":obj:`float`: A floating-point value."""
pass
def get_float2(self):
"""Returns a floating-point value.
Returns
-------
:obj:`float`
A floating-point value.
"""
pass
def get_list_of_bool1(self):
""":obj:`list` of :obj:`bool`: A list of boolean values."""
pass
def get_list_of_bool2(self):
"""A list of boolean values.
Returns
-------
:obj:`list` of :obj:`bool`
A list of boolean values.
"""
pass
def get_list_of_str1(self):
""":obj:`list` of :obj:`str`: A list of string values."""
pass
def get_list_of_str2(self):
"""A list of string values.
Returns
-------
:obj:`list` of :obj:`str`
A list of string values.
"""
pass
def get_list_of_bytes1(self):
""":obj:`list` of :obj:`bytes`: A list of bytes values."""
pass
def get_list_of_bytes2(self):
"""A list of bytes values.
Returns
-------
:obj:`list` of :obj:`bytes`
A list of bytes values.
"""
pass
def get_list_of_int1(self):
""":obj:`list` of :obj:`int`: A list of integer values."""
pass
def get_list_of_int2(self):
"""A list of integer values.
Returns
-------
:obj:`list` of :obj:`int`
A list of integer values.
"""
pass
def get_list_of_float1(self):
""":obj:`list` of :obj:`float`: A list of floating-point values."""
pass
def get_list_of_float2(self):
"""A list of floating-point values.
Returns
-------
:obj:`list` of :obj:`float`
A list of floating-point values.
"""
pass
def get_dict_of_bool1(self):
""":obj:`dict` of :obj:`bool`: A dictionary of boolean values."""
pass
def get_dict_of_bool2(self):
"""A dictionary of boolean values.
Returns
-------
:obj:`dict` of :obj:`bool`
A dictionary of boolean values.
"""
pass
def get_dict_of_str1(self):
""":obj:`dict` of :obj:`str`: A dictionary of string values."""
pass
def get_dict_of_str2(self):
"""A dictionary of string values.
Returns
-------
:obj:`dict` of :obj:`str`
A dictionary of string values.
"""
pass
def get_dict_of_bytes1(self):
""":obj:`dict` of :obj:`bytes`: A dictionary of bytes values."""
pass
def get_dict_of_bytes2(self):
"""A dictionary of bytes values.
Returns
-------
:obj:`dict` of :obj:`bytes`
A dictionary of bytes values.
"""
pass
def get_dict_of_int1(self):
""":obj:`dict` of :obj:`int`: A dictionary of integer values."""
pass
def get_dict_of_int2(self):
"""A dictionary of integer values.
Returns
-------
:obj:`dict` of :obj:`int`
A dictionary of integer values.
"""
pass
def get_dict_of_float1(self):
""":obj:`dict` of :obj:`float`: A dictionary of floating-point values."""
pass
def get_dict_of_float2(self):
"""A dictionary of floating-point values.
Returns
-------
:obj:`dict` of :obj:`float`
A dictionary of floating-point values.
"""
pass
def get_multiple1(self):
"""Many different data types.
Returns
-------
:obj:`str`
A string value.
:obj:`float`
A floating-point value.
:obj:`float`
A floating-point value.
:obj:`dict` of :obj:`int`
A dictionary of integer values.
:obj:`bytes`
A bytes value.
"""
pass
def test_return_type_builtin():
demo = ConnectionDemo(EquipmentRecord(), MyConnection)
assert demo.get_none1() is None
assert demo.get_none2() is None
assert isinstance(demo.get_bool1(), bool)
assert isinstance(demo.get_bool2(), bool)
assert isinstance(demo.get_string1(), str)
assert isinstance(demo.get_string2(), str)
assert isinstance(demo.get_bytes1(), bytes)
assert isinstance(demo.get_bytes2(), bytes)
assert isinstance(demo.get_int1(), int)
assert isinstance(demo.get_int2(), int)
assert isinstance(demo.get_float1(), float)
assert isinstance(demo.get_float2(), float)
x = demo.get_list_of_bool1()
assert isinstance(x, list) and isinstance(x[0], bool)
x = demo.get_list_of_bool2()
assert isinstance(x, list) and isinstance(x[0], bool)
x = demo.get_list_of_str1()
assert isinstance(x, list) and isinstance(x[0], str)
x = demo.get_list_of_str2()
assert isinstance(x, list) and isinstance(x[0], str)
x = demo.get_list_of_bytes1()
assert isinstance(x, list) and isinstance(x[0], bytes)
x = demo.get_list_of_bytes2()
assert isinstance(x, list) and isinstance(x[0], bytes)
x = demo.get_list_of_int1()
assert isinstance(x, list) and isinstance(x[0], int)
x = demo.get_list_of_int2()
assert isinstance(x, list) and isinstance(x[0], int)
x = demo.get_list_of_float1()
assert isinstance(x, list) and isinstance(x[0], float)
x = demo.get_list_of_float2()
assert isinstance(x, list) and isinstance(x[0], float)
x = demo.get_dict_of_bool1()
assert isinstance(x, dict) and isinstance(x['demo'], bool)
x = demo.get_dict_of_bool2()
assert isinstance(x, dict) and isinstance(x['demo'], bool)
x = demo.get_dict_of_str1()
assert isinstance(x, dict) and isinstance(x['demo'], str)
x = demo.get_dict_of_str2()
assert isinstance(x, dict) and isinstance(x['demo'], str)
x = demo.get_dict_of_bytes1()
assert isinstance(x, dict) and isinstance(x['demo'], bytes)
x = demo.get_dict_of_bytes2()
assert isinstance(x, dict) and isinstance(x['demo'], bytes)
x = demo.get_dict_of_int1()
assert isinstance(x, dict) and isinstance(x['demo'], int)
x = demo.get_dict_of_int2()
assert isinstance(x, dict) and isinstance(x['demo'], int)
x = demo.get_dict_of_float1()
assert isinstance(x, dict) and isinstance(x['demo'], float)
x = demo.get_dict_of_float2()
assert isinstance(x, dict) and isinstance(x['demo'], float)
x = demo.get_multiple1()
assert len(x) == 5
assert isinstance(x[0], str)
assert isinstance(x[1], float)
assert isinstance(x[2], float)
assert isinstance(x[3], dict) and isinstance(x[3]['demo'], int)
assert isinstance(x[4], bytes)
def test_return_type_object():
scope = ConnectionDemo(EquipmentRecord(), PicoScope)
x = scope.channel()
assert isinstance(x, dict) and x['demo'] == PicoScopeChannel
|
MSLNZ/msl-equipment
|
tests/test_connection_demo.py
|
Python
|
mit
| 8,679
|
from tests.test_pip import (reset_env, run_pip,
_create_test_package, _change_test_package_version)
from tests.local_repos import local_checkout
def test_install_editable_from_git_with_https():
"""
Test cloning from Git with https.
"""
reset_env()
result = run_pip('install', '-e',
'%s#egg=pip-test-package' %
local_checkout('git+https://github.com/pypa/pip-test-package.git'),
expect_error=True)
result.assert_installed('pip-test-package', with_files=['.git'])
def test_git_with_sha1_revisions():
"""
Git backend should be able to install from SHA1 revisions
"""
env = reset_env()
version_pkg_path = _create_test_package(env)
_change_test_package_version(env, version_pkg_path)
sha1 = env.run('git', 'rev-parse', 'HEAD~1', cwd=version_pkg_path).stdout.strip()
run_pip('install', '-e', '%s@%s#egg=version_pkg' % ('git+file://' + version_pkg_path.abspath.replace('\\', '/'), sha1))
version = env.run('version_pkg')
assert '0.1' in version.stdout, version.stdout
def test_git_with_branch_name_as_revision():
"""
Git backend should be able to install from branch names
"""
env = reset_env()
version_pkg_path = _create_test_package(env)
env.run('git', 'checkout', '-b', 'test_branch', expect_stderr=True, cwd=version_pkg_path)
_change_test_package_version(env, version_pkg_path)
run_pip('install', '-e', '%s@test_branch#egg=version_pkg' % ('git+file://' + version_pkg_path.abspath.replace('\\', '/')))
version = env.run('version_pkg')
assert 'some different version' in version.stdout
def test_git_with_tag_name_as_revision():
"""
Git backend should be able to install from tag names
"""
env = reset_env()
version_pkg_path = _create_test_package(env)
env.run('git', 'tag', 'test_tag', expect_stderr=True, cwd=version_pkg_path)
_change_test_package_version(env, version_pkg_path)
run_pip('install', '-e', '%s@test_tag#egg=version_pkg' % ('git+file://' + version_pkg_path.abspath.replace('\\', '/')))
version = env.run('version_pkg')
assert '0.1' in version.stdout
def test_git_with_tag_name_and_update():
"""
Test cloning a git repository and updating to a different version.
"""
reset_env()
result = run_pip('install', '-e', '%s#egg=pip-test-package' %
local_checkout('git+http://github.com/pypa/pip-test-package.git'),
expect_error=True)
result.assert_installed('pip-test-package', with_files=['.git'])
result = run_pip('install', '--global-option=--version', '-e',
'%s@0.1.1#egg=pip-test-package' %
local_checkout('git+http://github.com/pypa/pip-test-package.git'),
expect_error=True)
assert '0.1.1\n' in result.stdout
def test_git_branch_should_not_be_changed():
"""
Editable installations should not change branch
related to issue #32 and #161
"""
env = reset_env()
run_pip('install', '-e', '%s#egg=pip-test-package' %
local_checkout('git+http://github.com/pypa/pip-test-package.git'),
expect_error=True)
source_dir = env.venv_path/'src'/'pip-test-package'
result = env.run('git', 'branch', cwd=source_dir)
assert '* master' in result.stdout, result.stdout
def test_git_with_non_editable_unpacking():
"""
Test cloning a git repository from a non-editable URL with a given tag.
"""
reset_env()
result = run_pip('install', '--global-option=--version', local_checkout(
'git+http://github.com/pypa/pip-test-package.git@0.1.1#egg=pip-test-package'
), expect_error=True)
assert '0.1.1\n' in result.stdout
def test_git_with_editable_where_egg_contains_dev_string():
"""
Test cloning a git repository from an editable url which contains "dev" string
"""
reset_env()
result = run_pip('install', '-e', '%s#egg=django-devserver' %
local_checkout('git+git://github.com/dcramer/django-devserver.git'))
result.assert_installed('django-devserver', with_files=['.git'])
def test_git_with_non_editable_where_egg_contains_dev_string():
"""
Test cloning a git repository from a non-editable url which contains "dev" string
"""
env = reset_env()
result = run_pip('install', '%s#egg=django-devserver' %
local_checkout('git+git://github.com/dcramer/django-devserver.git'))
devserver_folder = env.site_packages/'devserver'
assert devserver_folder in result.files_created, str(result)
def test_git_with_ambiguous_revs():
"""
Test git with two "names" (tag/branch) pointing to the same commit
"""
env = reset_env()
version_pkg_path = _create_test_package(env)
package_url = 'git+file://%s@0.1#egg=version_pkg' % (version_pkg_path.abspath.replace('\\', '/'))
env.run('git', 'tag', '0.1', cwd=version_pkg_path)
result = run_pip('install', '-e', package_url)
assert 'Could not find a tag or branch' not in result.stdout
# it is 'version-pkg' instead of 'version_pkg' because
# egg-link name is version-pkg.egg-link because it is a single .py module
result.assert_installed('version-pkg', with_files=['.git'])
|
evnpr/herokupython
|
vendor/pip-1.2.1/tests/test_vcs_backends.py
|
Python
|
mit
| 5,343
|
import os, sys
origin_dir = 'del_201304now/'
new_dir = 'freq_event_state/'
files = os.listdir(origin_dir)
state_dir = {}
country_dir = {}
for file in files:
with open(origin_dir + file) as f:
event_dir = {}
for line in f:
tmp_content = line.split('\t')
code = tmp_content[4]
location = tmp_content[14]
tmp_loc = location.split(',')
length = len(tmp_loc)
state = ''
if length == 3:
state = tmp_loc[1]
elif length == 2:
state = tmp_loc[0]
else:
continue
country = tmp_loc[length-1]
if country not in country_dir:
country_dir[country] = {}
if state in country_dir[country]:
tmp_dir = country_dir[country][state]
if code in tmp_dir:
tmp_dir[code] += 1
else:
tmp_dir[code] = 1
else:
country_dir[country][state] = {}
country_dir[country][state][code] = 1
for country_name,countries in country_dir.items():
for state_name, states in countries.items():
dir_path = '%s%s/%s/'%(new_dir, country_name, state_name)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
with open(dir_path+file, 'a') as writer:
for event, freq in states.items():
writer.write(event+': '+str(freq)+'\n')
|
moment-of-peace/EventForecast
|
association_rule/event_frequent.py
|
Python
|
lgpl-3.0
| 1,535
|
#!/usr/bin/env python
# Copyright 2015, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import ipaddr
from maas_common import get_auth_ref
from maas_common import get_keystone_client
from maas_common import get_nova_client
from maas_common import metric
from maas_common import metric_bool
from maas_common import print_output
from maas_common import status_err
from maas_common import status_ok
# The actual stat names from novaclient are nasty, so this mapping is used to
# translate them to something more consistent and usable, as well as set the
# units for each metric
stats_mapping = {
'hypervisor_count': {
'stat_name': 'count', 'unit': 'hypervisors', 'type': 'uint32'
},
'total_disk_space': {
'stat_name': 'local_gb', 'unit': 'Gigabytes', 'type': 'uint32'
},
'used_disk_space': {
'stat_name': 'local_gb_used', 'unit': 'Gigabytes', 'type': 'uint32'
},
'free_disk_space': {
'stat_name': 'free_disk_gb', 'unit': 'Gigabytes', 'type': 'uint32'
},
'total_memory': {
'stat_name': 'memory_mb', 'unit': 'Megabytes', 'type': 'uint32'
},
'used_memory': {
'stat_name': 'memory_mb_used', 'unit': 'Megabytes', 'type': 'uint32'
},
'free_memory': {
'stat_name': 'free_ram_mb', 'unit': 'Megabytes', 'type': 'uint32'
},
'total_vcpus': {
'stat_name': 'vcpus', 'unit': 'vcpu', 'type': 'uint32'
},
'used_vcpus': {
'stat_name': 'vcpus_used', 'unit': 'vcpu', 'type': 'uint32'
}
}
def check(auth_ref, args):
keystone = get_keystone_client(auth_ref)
tenant_id = keystone.tenant_id
COMPUTE_ENDPOINT = (
'{protocol}://{ip}:8774/v2.1/{tenant_id}'
.format(ip=args.ip, tenant_id=tenant_id, protocol=args.protocol)
)
try:
if args.ip:
nova = get_nova_client(bypass_url=COMPUTE_ENDPOINT)
else:
nova = get_nova_client()
except Exception as e:
metric_bool('client_success', False, m_name='maas_nova')
status_err(str(e), m_name='maas_nova')
else:
metric_bool('client_success', True, m_name='maas_nova')
# get some cloud stats
stats = nova.hypervisor_stats.statistics()
cloud_stats = collections.defaultdict(dict)
for metric_name, vals in stats_mapping.iteritems():
multiplier = 1
if metric_name == 'total_vcpus':
multiplier = args.cpu_allocation_ratio
elif metric_name == 'total_memory':
multiplier = args.mem_allocation_ratio
cloud_stats[metric_name]['value'] = \
(getattr(stats, vals['stat_name']) * multiplier)
cloud_stats[metric_name]['unit'] = \
vals['unit']
cloud_stats[metric_name]['type'] = \
vals['type']
status_ok(m_name='maas_nova')
for metric_name in cloud_stats.iterkeys():
metric('cloud_resource_%s' % metric_name,
cloud_stats[metric_name]['type'],
cloud_stats[metric_name]['value'],
cloud_stats[metric_name]['unit'])
def main(args):
auth_ref = get_auth_ref()
check(auth_ref, args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Check Nova hypervisor stats')
parser.add_argument('--cpu',
type=float,
default=1.0,
required=False,
action='store',
dest='cpu_allocation_ratio',
help='cpu allocation ratio')
parser.add_argument('--mem',
type=float,
default=1.0,
required=False,
action='store',
dest='mem_allocation_ratio',
help='mem allocation ratio')
parser.add_argument('ip', nargs='?',
type=ipaddr.IPv4Address,
help='Nova API IP address')
parser.add_argument('--telegraf-output',
action='store_true',
default=False,
help='Set the output format to telegraf')
parser.add_argument('--protocol',
type=str,
help='Protocol to use for contacting nova',
default='http')
args = parser.parse_args()
with print_output(print_telegraf=args.telegraf_output):
main(args)
|
briancurtin/rpc-maas
|
playbooks/files/rax-maas/plugins/nova_cloud_stats.py
|
Python
|
apache-2.0
| 5,067
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
test_records = frappe.get_test_records('Brand')
|
BhupeshGupta/erpnext
|
erpnext/setup/doctype/brand/test_brand.py
|
Python
|
agpl-3.0
| 233
|
# coding=utf-8
"""
The Collector class is a base class for all metric collectors.
"""
import os
import socket
import platform
import logging
import configobj
import traceback
import time
from diamond.metric import Metric
# Detect the architecture of the system and set the counters for MAX_VALUES
# appropriately. Otherwise, rolling over counters will cause incorrect or
# negative values.
if platform.architecture()[0] == '64bit':
MAX_COUNTER = (2 ** 64) - 1
else:
MAX_COUNTER = (2 ** 32) - 1
def get_hostname(config, method=None):
"""
Returns a hostname as configured by the user
"""
if 'hostname' in config:
return config['hostname']
if method is None:
if 'hostname_method' in config:
method = config['hostname_method']
else:
method = 'smart'
# case insensitive method
method = method.lower()
if method in get_hostname.cached_results:
return get_hostname.cached_results[method]
if method == 'smart':
hostname = get_hostname(config, 'fqdn_short')
if hostname != 'localhost':
get_hostname.cached_results[method] = hostname
return hostname
hostname = get_hostname(config, 'hostname_short')
get_hostname.cached_results[method] = hostname
return hostname
if method == 'fqdn_short':
hostname = socket.getfqdn().split('.')[0]
get_hostname.cached_results[method] = hostname
return hostname
if method == 'fqdn':
hostname = socket.getfqdn().replace('.', '_')
get_hostname.cached_results[method] = hostname
return hostname
if method == 'fqdn_rev':
hostname = socket.getfqdn().split('.')
hostname.reverse()
hostname = '.'.join(hostname)
get_hostname.cached_results[method] = hostname
return hostname
if method == 'uname_short':
hostname = os.uname()[1].split('.')[0]
get_hostname.cached_results[method] = hostname
return hostname
if method == 'uname_rev':
hostname = os.uname()[1].split('.')
hostname.reverse()
hostname = '.'.join(hostname)
get_hostname.cached_results[method] = hostname
return hostname
if method == 'hostname':
hostname = socket.gethostname()
get_hostname.cached_results[method] = hostname
return hostname
if method == 'hostname_short':
hostname = socket.gethostname().split('.')[0]
get_hostname.cached_results[method] = hostname
return hostname
if method == 'hostname_rev':
hostname = socket.gethostname().split('.')
hostname.reverse()
hostname = '.'.join(hostname)
get_hostname.cached_results[method] = hostname
return hostname
if method == 'none':
get_hostname.cached_results[method] = None
return None
raise NotImplementedError(config['hostname_method'])
get_hostname.cached_results = {}
def str_to_bool(value):
"""
Converts string ('true', 'false') to bool
"""
if isinstance(value, basestring):
if value.strip().lower() == 'true':
return True
else:
return False
return value
class Collector(object):
"""
The Collector class is a base class for all metric collectors.
"""
def __init__(self, config, handlers):
"""
Create a new instance of the Collector class
"""
# Initialize Logger
self.log = logging.getLogger('diamond')
# Initialize Members
self.name = self.__class__.__name__
self.handlers = handlers
self.last_values = {}
# Get Collector class
cls = self.__class__
# Initialize config
self.config = configobj.ConfigObj()
# Check if default config is defined
if self.get_default_config() is not None:
# Merge default config
self.config.merge(self.get_default_config())
# Merge default Collector config
self.config.merge(config['collectors']['default'])
# Check if Collector config section exists
if cls.__name__ in config['collectors']:
# Merge Collector config section
self.config.merge(config['collectors'][cls.__name__])
# Check for config file in config directory
configfile = os.path.join(config['server']['collectors_config_path'],
cls.__name__) + '.conf'
if os.path.exists(configfile):
# Merge Collector config file
self.config.merge(configobj.ConfigObj(configfile))
# Handle some config file changes transparently
if isinstance(self.config['byte_unit'], basestring):
self.config['byte_unit'] = self.config['byte_unit'].split()
self.config['enabled'] = str_to_bool(self.config['enabled'])
self.config['measure_collector_time'] = str_to_bool(
self.config['measure_collector_time'])
self.collect_running = False
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this collector
"""
return {
'enabled': 'Enable collecting these metrics',
'byte_unit': 'Default numeric output(s)',
'measure_collector_time': 'Collect the collector run time in ms',
}
def get_default_config(self):
"""
Return the default config for the collector
"""
return {
### Defaults options for all Collectors
# Uncomment and set to hardcode a hostname for the collector path
# Keep in mind, periods are seperators in graphite
# 'hostname': 'my_custom_hostname',
# If you perfer to just use a different way of calculating the
# hostname
# Uncomment and set this to one of these values:
# fqdn_short = Default. Similar to hostname -s
# fqdn = hostname output
# fqdn_rev = hostname in reverse (com.example.www)
# uname_short = Similar to uname -n, but only the first part
# uname_rev = uname -r in reverse (com.example.www)
# 'hostname_method': 'fqdn_short',
# All collectors are disabled by default
'enabled': False,
# Path Prefix
'path_prefix': 'servers',
# Path Prefix for Virtual Machine metrics
'instance_prefix': 'instances',
# Path Suffix
'path_suffix': '',
# Default splay time (seconds)
'splay': 1,
# Default Poll Interval (seconds)
'interval': 300,
# Default collector threading model
'method': 'Sequential',
# Default numeric output
'byte_unit': 'byte',
# Collect the collector run time in ms
'measure_collector_time': False,
}
def get_stats_for_upload(self, config=None):
if config is None:
config = self.config
stats = {}
if 'enabled' in config:
stats['enabled'] = config['enabled']
else:
stats['enabled'] = False
if 'interval' in config:
stats['interval'] = config['interval']
return stats
def get_schedule(self):
"""
Return schedule for the collector
"""
# Return a dict of tuples containing (collector function,
# collector function args, splay, interval)
return {self.__class__.__name__: (self._run,
None,
int(self.config['splay']),
int(self.config['interval']))}
def get_metric_path(self, name, instance=None):
"""
Get metric path.
Instance indicates that this is a metric for a
virtual machine and should have a different
root prefix.
"""
if 'path' in self.config:
path = self.config['path']
else:
path = self.__class__.__name__
if instance is not None:
if 'instance_prefix' in self.config:
prefix = self.config['instance_prefix']
else:
prefix = 'instances'
if path == '.':
return '.'.join([prefix, instance, name])
else:
return '.'.join([prefix, instance, path, name])
if 'path_prefix' in self.config:
prefix = self.config['path_prefix']
else:
prefix = 'systems'
if 'path_suffix' in self.config:
suffix = self.config['path_suffix']
else:
suffix = None
hostname = get_hostname(self.config)
if hostname is not None:
if prefix:
prefix = ".".join((prefix, hostname))
else:
prefix = hostname
# if there is a suffix, add after the hostname
if suffix:
prefix = '.'.join((prefix, suffix))
if path == '.':
return '.'.join([prefix, name])
else:
return '.'.join([prefix, path, name])
def get_hostname(self):
return get_hostname(self.config)
def collect(self):
"""
Default collector method
"""
raise NotImplementedError()
def publish(self, name, value, raw_value=None, precision=0,
metric_type='GAUGE', instance=None):
"""
Publish a metric with the given name
"""
# Get metric Path
path = self.get_metric_path(name, instance=instance)
# Create Metric
metric = Metric(path, value, raw_value=raw_value, timestamp=None,
precision=precision, host=self.get_hostname(),
metric_type=metric_type)
# Publish Metric
self.publish_metric(metric)
def publish_metric(self, metric):
"""
Publish a Metric object
"""
# Process Metric
for handler in self.handlers:
handler._process(metric)
def publish_gauge(self, name, value, precision=0, instance=None):
return self.publish(name, value, precision=precision,
metric_type='GAUGE', instance=instance)
def publish_counter(self, name, value, precision=0, max_value=0,
time_delta=True, interval=None, allow_negative=False,
instance=None):
raw_value = value
value = self.derivative(name, value, max_value=max_value,
time_delta=time_delta, interval=interval,
allow_negative=allow_negative,
instance=instance)
return self.publish(name, value, raw_value=raw_value,
precision=precision, metric_type='COUNTER',
instance=instance)
def derivative(self, name, new, max_value=0,
time_delta=True, interval=None,
allow_negative=False, instance=None):
"""
Calculate the derivative of the metric.
"""
# Format Metric Path
path = self.get_metric_path(name, instance=instance)
if path in self.last_values:
old = self.last_values[path]
# Check for rollover
if new < old:
old = old - max_value
# Get Change in X (value)
derivative_x = new - old
# If we pass in a interval, use it rather then the configured one
if interval is None:
interval = int(self.config['interval'])
# Get Change in Y (time)
if time_delta:
derivative_y = interval
else:
derivative_y = 1
result = float(derivative_x) / float(derivative_y)
if result < 0 and not allow_negative:
result = 0
else:
result = 0
# Store Old Value
self.last_values[path] = new
# Return result
return result
def _run(self):
"""
Run the collector unless it's already running
"""
if self.collect_running:
return
# Log
self.log.debug("Collecting data from: %s" % self.__class__.__name__)
try:
try:
start_time = time.time()
self.collect_running = True
# Collect Data
self.collect()
end_time = time.time()
if 'measure_collector_time' in self.config:
if self.config['measure_collector_time']:
metric_name = 'collector_time_ms'
metric_value = int((end_time - start_time) * 1000)
self.publish(metric_name, metric_value)
except Exception:
# Log Error
self.log.error(traceback.format_exc())
finally:
self.collect_running = False
# After collector run, invoke a flush
# method on each handler.
for handler in self.handlers:
handler._flush()
|
datafiniti/Diamond
|
src/diamond/collector.py
|
Python
|
mit
| 13,380
|
#!/usr/bin/env python
# Web of Science search handler through SOAP WoS API
# For more information see:
# http://ip-science.interest.thomsonreuters.com/data-integration
#
# Requires username and password to access the service
#
# Copyright 2017, Krzysztof Kutt
import zeep
from xml.etree import ElementTree
AUTH_URL = "http://search-1webofknowledge-1com-1000029ky0197.wbg2.bg.agh.edu" \
".pl/esti/wokmws/ws" \
"/WOKMWSAuthenticate?wsdl"
auth_client = None
session_id = None
def wos_login() -> None:
global auth_client, session_id
auth_client = zeep.Client(wsdl=AUTH_URL)
session_id = auth_client.service.authenticate()
print("Session ID: {}".format(session_id))
pass
def wos_logout() -> None:
global auth_client, session_id
if auth_client is not None:
print(auth_client.service.closeSession())
pass
def perform_search(query_string) -> str:
"""
Performs search in PubMed with given query_string (exemplary query
string: 'asthma[mesh] AND leukotrienes[mesh] AND 2009[pdat]')
:param query_string: list of terms to search
:return: xml response with details (from summary service)
"""
# prepare the basic search query
# search_data = dict()
# search_data['db'] = DATABASE
# search_data['usehistory'] = 'y'
# search_data['term'] = query_string
#
# url_base = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/'
# search_service = 'esearch.fcgi'
# summary_service = 'esummary.fcgi'
#
# full_url = url_base + search_service + '?' + \
# urllib.parse.urlencode(search_data)
#
# # post the query
# with urllib.request.urlopen(full_url) as search_response:
# # get WebEnv and QueryKey
# response_xml = search_response.read().decode('utf-8')
# web_env = re.search('<WebEnv>(\S+)</WebEnv>', response_xml).group(1)
# query_key = re.search('<QueryKey>(\d+)</QueryKey>',
# response_xml).group(1)
#
# # prepare the second query to receive the details
# summary_data = dict()
# summary_data['db'] = DATABASE
# summary_data['query_key'] = query_key
# summary_data['WebEnv'] = web_env
#
# full_url = url_base + summary_service + '?' + \
# urllib.parse.urlencode(summary_data)
#
# with urllib.request.urlopen(full_url) as response:
# response_xml = response.read().decode('utf-8')
# return response_xml
pass
def handle_doc(doc) -> None:
"""
Handles the xml node representing one document in results
:param doc: xml node with document data
:return: nothing
"""
# print("Title: {}".format(doc.findall('./Item[@Name="Title"]')[0].text))
# print("Authors: ", end="")
# for author in doc.findall('./Item[@Name="AuthorList"]/'):
# print(author.text, end=", ")
# print()
# doi = doc.findall('./Item[@Name="DOI"]')
# if len(doi):
# print("DOI: {}".format(doi[0].text))
# else:
# print("DOI not available")
pass
def handle_response(response_text) -> None:
"""
Handles xml response to extract the articles data
:param response_text: string with xml response
:return: nothing
"""
# response = ElementTree.fromstring(response_text)
#
# for doc in response.findall('DocSum'):
# handle_doc(doc)
# print('---------------------------')
pass
if __name__ == '__main__':
wos_login()
# query_string = 'asthma AND leukotrienes'
# response_xml = perform_search(query_string)
# handle_response(response_xml)
# wos_logout()
|
kkutt/references-tracking
|
handler/search/webofscience.py
|
Python
|
gpl-3.0
| 3,676
|
import logging
from django import forms
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _, ugettext
from reviewboard.hostingsvcs.errors import (AuthorizationError,
TwoFactorAuthCodeRequiredError)
from reviewboard.hostingsvcs.models import HostingServiceAccount
from reviewboard.scmtools.errors import UnverifiedCertificateError
from reviewboard.scmtools.forms import (BaseRepositoryAuthSubForm,
BaseRepositoryInfoSubForm)
class _HostingServiceSubFormMixin(object):
"""Mixin for hosting service subforms.
This is used internally by :py:class:`HostingServiceForm` and
:py:class:`HostingServiceAuthForm` to check and set initial state
relating to the hosting service.
Version Added:
3.0.16
Attributes:
hosting_service_cls (type):
The subclass of
:py:class:`~reviewboard.hostingsvcs.service.HostingService` that
owns this form.
"""
def __init__(self, *args, **kwargs):
"""Initialize the authentication form.
Args:
*args (tuple):
Additional positional arguments for the parent form.
**kwargs (dict):
Additional keyword arguments for the parent form.
Keyword Args:
hosting_service_cls (type):
The hosting service class (subclass of
:py:class:`~reviewboard.hostingsvcs.service.HostingService`)
that works with this form.
This must be provided, or an assertion error will be raised.
Raises:
ValueError:
One or more of the paramters are missing or not valid for the
provided hosting account. Details are given in the error
message.
"""
hosting_service_cls = kwargs.pop('hosting_service_cls', None)
if not hosting_service_cls:
raise ValueError('hosting_service_cls cannot be None.')
self.hosting_service_cls = hosting_service_cls
super(_HostingServiceSubFormMixin, self).__init__(*args, **kwargs)
class HostingServiceAuthForm(_HostingServiceSubFormMixin,
BaseRepositoryAuthSubForm):
"""Base form for handling authentication information for a hosting account.
This takes care of collecting additional details needed for authenticating
an account, including that information with the account credentials (if
needed by the hosting service).
By default, this will retain the existing username, password, and
two-factor auth fields. Those can be replaced, but the field names should
remain the same.
Unlike :py:class:`HostingServiceForm`, field names on this class do not
need to include a service-specific prefix, as they will not conflict with
other forms. The field names will be used for the data storage. How a
subclass chooses to name these fields is up to them.
Subclasses can define a ``Meta`` class on the form containing
``help_texts`` and ``labels`` attributes, mapping field names to custom
help text or labels. This is useful for providing more specific
instructions for setting authentication data for a given service without
having to override the built-in fields. For example:
.. code-block:: python
from django.utils.translation import ugettext_lazy as _
from reviewboard.hostingsvcs.forms import HostingServiceAuthForm
class MyAuthForm(HostingServiceAuthForm):
class Meta:
labels = {
'hosting_account_username': 'API Access ID',
'hosting_account_password': 'API Secret Key',
}
help_texts = {
'hosting_account_username': _(
'Access ID used for the API. This can be found in '
'your FooService account settings.'
),
'hosting_account_password': _(
'Secret key used for the API. This can be found in '
'your FooService account settings.'
),
}
"""
hosting_url = forms.CharField(
label=_('Service URL'),
required=True,
widget=forms.TextInput(attrs={'size': 30}))
hosting_account_username = forms.CharField(
label=_('Account username'),
required=True,
widget=forms.TextInput(attrs={
'size': 30,
'autocomplete': 'off',
}))
hosting_account_password = forms.CharField(
label=_('Account password'),
required=True,
widget=forms.PasswordInput(
attrs={
'size': 30,
'autocomplete': 'off',
},
render_value=True))
hosting_account_two_factor_auth_code = forms.CharField(
label=_('Two-factor auth code'),
required=False,
widget=forms.TextInput(attrs={
'size': 30,
'autocomplete': 'off',
'data-required-for-2fa': 'true',
}))
def __init__(self, *args, **kwargs):
"""Initialize the authentication form.
Args:
*args (tuple):
Additional positional arguments for the parent form.
**kwargs (dict):
Additional keyword arguments for the parent form.
Keyword Args:
hosting_account (reviewboard.hostingsvcs.models.
HostingServiceAccount,
optional):
The hosting service account being updated, if any. If ``None``,
a new one will be created.
Raises:
ValueError:
One or more of the paramters are missing or not valid for the
provided hosting account. Details are given in the error
message.
"""
hosting_account = kwargs.pop('hosting_account', None)
self.hosting_account = hosting_account
super(HostingServiceAuthForm, self).__init__(*args, **kwargs)
hosting_service_cls = self.hosting_service_cls
hosting_service_id = hosting_service_cls.hosting_service_id
# Make sure that the hosting account, if provided, is compatible with
# the arguments provided.
if (hosting_account and
(hosting_account.local_site != self.local_site or
hosting_account.service_name != hosting_service_id)):
raise ValueError(
ugettext('This account is not compatible with this '
'hosting service configuration.'))
# If the hosting service is not self-hosted, we don't want to include
# the hosting_url form.
if not hosting_service_cls.self_hosted:
del self.fields['hosting_url']
# If it doesn't support two-factor auth, get rid of that field.
if not hosting_service_cls.supports_two_factor_auth:
del self.fields['hosting_account_two_factor_auth_code']
def get_initial_data(self):
"""Return initial data for the form, based on the hosting account.
This will return initial data for the fields, generally pulled from
the hosting account. This will be used when relinking a hosting
account that's no longer authorized.
Generally, sensitive information, like passwords, should not be
provided.
By default, the :py:attr:`username` and :py:attr:`hosting_url` fields
will have data provided. Subclasses can override this to present more
initial data.
This is only called if the form was provided a hosting account during
construction.
Returns:
dict:
Initial data for the form.
"""
initial = {}
if self.hosting_account:
initial['username'] = self.hosting_account.username
if self.hosting_service_cls.self_hosted:
initial['hosting_url'] = self.hosting_account.hosting_url
return initial
def get_credentials(self):
"""Return credentials from the form.
This should return the data that will be stored along with the
:py:class:`~reviewboard.hostingsvcs.models.HostingServiceAccount`.
The ``username``, ``password``, and ``two_factor_auth_code`` values
are treated specially during the creation and authentication of the
account, and should be provided for most standard hosting services.
All values will be provided to
:py:attr:`HostingService.authenticate
<reviewboard.hostingsvcs.service.HostingService.authenticate>`, which
will be responsible for making use of these values and storing them
on the account.
Subclasses should call the parent method and use their results as
a base, if they reuse any of the built-in fields.
Returns:
dict:
A dictionary of credentials used to authenticate the account and
talk to the API.
"""
credentials = {
'username': self.cleaned_data['hosting_account_username'],
'password': self.cleaned_data['hosting_account_password'],
}
two_factor_auth_code = \
self.cleaned_data.get('hosting_account_two_factor_auth_code')
if two_factor_auth_code:
credentials['two_factor_auth_code'] = two_factor_auth_code
return credentials
def save(self, allow_authorize=True, force_authorize=False,
extra_authorize_kwargs=None, trust_host=False, save=True):
"""Save the hosting account and authorize against the service.
This will create or update a hosting account, based on the information
provided in the form and to this method.
:py:meth:`is_valid` must be called prior to saving.
Args:
allow_authorize (bool, optional):
If ``True`` (the default), the account will be authorized
against the hosting service. If ``False``, only the database
entry for the account will be affected.
force_authorize (bool, optional):
Force the account to be re-authorized, if already authorized.
extra_authorize_kwargs (dict, optional):
Additional keyword arguments to provide for the
:py:meth:`HostingService.authorize()
<reviewboard.hostingsvcs.models.HostingService.authorize>`
call.
trust_host (bool, optional):
Whether to trust the given host, even if the linked certificate
is invalid or self-signed.
save (bool, optional):
Whether or not the created account should be saved.
This is intended to be used by subclasses who want to add
additional data to the resulting hosting account before saving.
If this is ``False``, the caller must ensure the resulting
hosting account is saved.
Returns:
reviewboard.hostingsvcs.models.HostingServiceAccount:
The updated or created hosting service account.
Raises:
reviewboard.hostingsvcs.errors.AuthorizationError:
Information needed to authorize was missing, or authorization
failed.
reviewboard.hostingsvcs.errors.TwoFactorAuthCodeRequiredError:
A two-factor authentication code is required to authorize the
account. A code will need to be provided to the form.
"""
if extra_authorize_kwargs is None:
extra_authorize_kwargs = {}
credentials = self.get_credentials()
# Grab the username from the credentials, sanity-checking that it's
# been provided as part of the get_credentials() result.
try:
username = credentials['username']
except KeyError:
logging.exception('%s.get_credentials() must return a "username" '
'key.',
self.__class__.__name__)
raise AuthorizationError(
ugettext('Hosting service implementation error: '
'%s.get_credentials() must return a "username" key.')
% self.__class__.__name__)
hosting_account = self.hosting_account
hosting_service_id = self.hosting_service_cls.hosting_service_id
hosting_url = self.cleaned_data.get('hosting_url')
if not self.hosting_service_cls.self_hosted:
assert hosting_url is None
if hosting_account:
# Update the username and hosting URL, if they've changed.
hosting_account.username = username
hosting_account.hosting_url = hosting_url
else:
# Fetch an existing hosting account based on the credentials and
# parameters, if there is one. If not, we're going to create one,
# but we won't save it until we've authorized.
hosting_account_attrs = {
'service_name': hosting_service_id,
'username': username,
'hosting_url': hosting_url,
'local_site': self.local_site,
}
try:
hosting_account = \
HostingServiceAccount.objects.get(**hosting_account_attrs)
except HostingServiceAccount.DoesNotExist:
# Create a new one, but don't save it yet.
hosting_account = \
HostingServiceAccount(**hosting_account_attrs)
if (allow_authorize and
self.hosting_service_cls.needs_authorization and
(not hosting_account.is_authorized or force_authorize)):
# Attempt to authorize the account.
if self.local_site:
local_site_name = self.local_site.name
else:
local_site_name = None
password = credentials.get('password')
two_factor_auth_code = credentials.get('two_factor_auth_code')
authorize_kwargs = dict({
'username': username,
'password': password,
'hosting_url': hosting_url,
'two_factor_auth_code': two_factor_auth_code,
'local_site_name': local_site_name,
'credentials': credentials,
}, **extra_authorize_kwargs)
try:
self.authorize(hosting_account, hosting_service_id,
**authorize_kwargs)
except UnverifiedCertificateError as e:
if trust_host:
hosting_account.accept_certificate(e.certificate)
self.authorize(hosting_account, hosting_service_id,
**authorize_kwargs)
else:
raise
if save:
hosting_account.save()
return hosting_account
def authorize(self, hosting_account, hosting_service_id,
username=None, local_site_name=None, **kwargs):
"""Authorize the service.
Args:
hosting_account (reviewboard.hostingsvcs.models.
HostingServiceAccount):
The hosting service account.
hosting_service_id (unicode):
The ID of the hosting service.
username (unicode):
The username for the account.
local_site_name (unicode, optional):
The Local Site name, if any, that the account should be
bound to.
**kwargs (dict):
Keyword arguments to pass into the service authorize function.
"""
try:
hosting_account.service.authorize(username=username,
local_site_name=local_site_name,
**kwargs)
except TwoFactorAuthCodeRequiredError:
# Mark this as required for the next form render.
self.fields['hosting_account_two_factor_auth_code']\
.required = True
# Re-raise the error.
raise
except AuthorizationError:
logging.exception('Authorization error linking hosting '
'account ID=%r for hosting service=%r, '
'username=%r, LocalSite=%r',
hosting_account.pk, hosting_service_id,
username, local_site_name)
# Re-raise the error.
raise
except UnverifiedCertificateError:
# Re-raise the error so the user will see the "I trust this
# host" prompt.
raise
except Exception:
logging.exception('Unknown error linking hosting account '
'ID=%r for hosting service=%r, '
'username=%r, LocalSite=%r',
hosting_account.pk, hosting_service_id,
username, local_site_name)
# Re-raise the error.
raise
def clean_hosting_url(self):
"""Clean the hosting URL field.
Returns:
unicode:
A string containing the hosting URL, or ``None``.
"""
return self.cleaned_data['hosting_url'] or None
class HostingServiceForm(_HostingServiceSubFormMixin,
BaseRepositoryInfoSubForm):
"""Base form for collecting information for a hosting service.
This is responsible for providing fields used to communicate with a
particular hosting service, such as a registered organization name or ID
on the service. There may be one global form (set in
:py:attr:`HostingService.form
<reviewboard.hostingsvcs.service.HostingService.form>` or one per plan.
Each field will be stored directly in :py:attr:`Repository.extra_data
<reviewboard.scmtools.models.Repository.extra_data>`, using the field's
name as the key.
Subclasses are expected to prefix every field with the ID of the hosting
service, to avoid conflicts.
Subclasses may also define a ``Meta`` class on the form containing
``help_texts`` and ``labels`` attributes, mapping field names to custom
help text or labels. This is useful if a hosting service has a base form
for collecting details for each plan, and wants to customize the labels
and help text for each subclass. For example:
.. code-block:: python
from django import forms
from django.utils.translation import ugettext_lazy as _
from reviewboard.hostingsvcs.forms import HostingServiceForm
class MyServiceBaseForm(HostingServiceForm):
myservice_owner = forms.CharField(max_length=64)
class MyServiceOrgPlanForm(MyServiceBaseForm):
class Meta:
labels = {
'myservice_owner': _('User'),
}
help_texts = {
'myservice_owner': _(
'The username of the user owning the repository.'
),
}
class MyServicePersonalPlanForm(MyServiceBaseForm):
class Meta:
labels = {
'myservice_owner': _('Organization'),
}
help_texts = {
'myservice_owner': _(
'The ID of the organization owning the repository.'
),
}
"""
def get_initial_data(self):
"""Return initial data for the form.
This will load information from the repository's
:py:attr:`~reviewboard.scmtools.models.Repository.extra_data` into the
form's fields.
Returns:
dict:
Initial data for the form.
"""
return self.get_field_data_from(self.repository)
def load(self, repository=None, **kwargs):
"""Load information for the form.
By default, this will populate initial values returned in
:py:meth:`get_initial_data`. Subclasses can override this to set
other fields or state as needed.
Args:
repository (reviewboard.scmtools.models.Repository, optional):
The repository being loaded. This is scheduled to be
deprecated. Subclasses should use the :py:attr:`repository`
attribute instead.
"""
super(HostingServiceForm, self).load()
def save(self, repository=None, **kwargs):
"""Save information from the form back to the repository.
This will set each field in the repository's
:py:attr:`~reviewboard.scmtools.models.Repository.extra_data`.
Args:
repository (reviewboard.scmtools.models.Repository, optional):
The repository being loaded. This is scheduled to be
deprecated. Subclasses should use the :py:attr:`repository`
attribute instead.
"""
if repository is None:
repository = self.repository
for key, value in self.cleaned_data.items():
key = self.add_prefix(force_text(key))
repository.extra_data[key] = value
|
reviewboard/reviewboard
|
reviewboard/hostingsvcs/forms.py
|
Python
|
mit
| 21,600
|
""" GTK GUI module for pypass """
# -*- coding: utf-8 -*-
# Copyright (c) 2011 Pierre-Yves Chibon <pingou AT pingoured DOT fr>
# Copyright (c) 2011 Johan Cwiklinski <johan AT x-tnd DOT be>
#
# This file is part of pypass.
#
# pypass is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pypass is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pypass. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import logging
import pypass.pyp as pyp
from pypass import __version__, __author__, __copyright__
from pypass import __license_text__, __application__, __locale_dir__
from pypass import __url__, __credits__
from pypass.pypobj import PypFolder, PypAccount
LOG = logging.getLogger(__name__)
if not LOG.handlers:
try:
LOG.addHandler(logging.NullHandler())
except AttributeError:
LOG.addHandler(pyp.PypNullHandler())
try:
import pygtk
pygtk.require("2.0")
except ImportError:
pass
try:
import gtk
except ImportError:
LOG.error(_("GTK is not available."))
sys.exit(1)
import gtk.glade
import gettext
#that way, GUI is not translated (but should be)
#gettext.install(__application__, __locale_dir__)
#solution found there:
#http://www.daa.com.au/pipermail/pygtk/2007-March/013586.html
#locale.setlocale(locale.LC_ALL, '')
# see http://bugzilla.gnome.org/show_bug.cgi?id=344926 for why the
# next two commands look repeated.
gtk.glade.bindtextdomain(__application__, __locale_dir__)
gtk.glade.textdomain(__application__)
gettext.bindtextdomain(__application__, __locale_dir__)
gettext.textdomain(__application__)
def file_browse(dialog_action, title, pathname, file_name="",
types=None):
"""This function is used to browse for a pyWine file.
It can be either a save or open dialog depending on
what dialog_action is.
The path to the file will be returned if the user
selects one, however a blank string will be returned
if they cancel or do not select one.
dialog_action - The open or save mode for the dialog either
gtk.FILE_CHOOSER_ACTION_OPEN, gtk.FILE_CHOOSER_ACTION_SAVE
file_name - Default name when doing a save
source:
http://www.pygtk.org/articles/extending-our-pygtk-application/extending-our-pygtk-application.htm
"""
if (dialog_action == gtk.FILE_CHOOSER_ACTION_OPEN):
dialog_buttons = (gtk.STOCK_CANCEL,
gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN,
gtk.RESPONSE_OK)
else:
dialog_buttons = (gtk.STOCK_CANCEL,
gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE,
gtk.RESPONSE_OK)
file_dialog = gtk.FileChooserDialog(title=title,
action=dialog_action,
buttons=dialog_buttons)
#set the filename if we are saving
if (dialog_action == gtk.FILE_CHOOSER_ACTION_SAVE):
file_dialog.set_current_name(file_name)
file_dialog.set_current_folder(pathname)
file_dialog.set_default_response(gtk.RESPONSE_OK)
if types is not None and isinstance(types, dict):
for typek in types.keys():
filefilter = gtk.FileFilter()
filefilter.set_name(typek)
for element in types[typek]:
filefilter.add_mime_type(element)
file_dialog.add_filter(filefilter)
#Create and add the 'all files' filter
filefilter = gtk.FileFilter()
filefilter.set_name("All files")
filefilter.add_pattern("*")
file_dialog.add_filter(filefilter)
#Init the return value
result = None
if file_dialog.run() == gtk.RESPONSE_OK:
result = file_dialog.get_filename()
file_dialog.destroy()
return result
def _dialog(dialog):
""" Display a dialog window """
dialog.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
dialog.set_modal(True)
dialog.set_keep_above(True)
dialog.show_all()
result = dialog.run()
dialog.hide()
return result
def dialog_window(message, error=None, action=gtk.MESSAGE_ERROR):
"""
Display a dialog window with the given message.
Action precise the type of dialog to display, the return signal is
handled accordingly.
message -- a string of text which is displayed in the dialog
error -- a string with another part of the message displayed on
a second line
action -- a GTK Message Type Constants giving the type of window
displayed _link: http://www.pygtk.org/docs/pygtk/gtk-constants.html#gtk-message-type-constants
"""
dialog = gtk.MessageDialog(None, 0, action)
dialog.set_markup("<b>" + "Error" + "</b>")
if error is not None:
message = message + "\n %s" % error
dialog.format_secondary_markup(message)
if action == gtk.MESSAGE_ERROR:
dialog.add_buttons(gtk.STOCK_OK, gtk.RESPONSE_YES)
elif action == gtk.MESSAGE_WARNING:
dialog.add_buttons(gtk.STOCK_CANCEL,
gtk.RESPONSE_CANCEL,
gtk.STOCK_OK,
gtk.RESPONSE_YES)
elif action == gtk.MESSAGE_QUESTION:
dialog.add_buttons(gtk.STOCK_NO,
gtk.RESPONSE_NO,
gtk.STOCK_OK,
gtk.RESPONSE_YES)
return _dialog(dialog)
class PyPassGui(object):
""" Class handling the gtk gui for pypass """
def __init__(self, pypass, options):
""" Instantiate the window and set basic elements """
self.pypass = pypass
self.builder = gtk.Builder()
self.builder.add_from_file(os.path.join(os.path.dirname(
os.path.realpath(__file__)), "ui", "pypass.glade"))
self.builder.set_translation_domain(__application__)
self.mainwindow = self.builder.get_object('mainwindow')
self.mainwindow.set_title("PyPass")
self.set_button_toolbar()
## source to put the icons in the TreeView:
## http://www.eurion.net/python-snippets/snippet/Tree%20View%20Column.html
## Handles the tree view in the main window
# retrieve the TreeView
treeview = self.builder.get_object("treefolderview")
# create the TreeViewColumns to display the data
col0 = gtk.TreeViewColumn("")
treeview.append_column(col0)
# create a CellRenderers to render the data
cellpb = gtk.CellRendererPixbuf()
cell = gtk.CellRendererText()
# add the cells to the columns - 2
col0.pack_start(cellpb, False)
col0.pack_start(cell, True)
# set the cell attributes to the appropriate liststore column
col0.set_attributes(cellpb, stock_id=1)
col0.set_attributes(cell, text=0)
# filename contains the name of the file which has been opened
# This way if you open a file and press ctrl+s you save in the file
# you opened not in the default one.
self.filename = None
# key is the key you select, if it is None it will use the default
# if set in the configuration file.
self.key = None
# created reflects whether the database was new/manually created
self.created = False
self.data = PypFolder()
if options.filename is not None:
self.filename = options.filename
self.pypass.load_data(filename=self.filename)
if self.pypass.data is not None and self.pypass.data != "":
self.load_password_tree(self.pypass.json_to_tree())
# Add the images on the button :-)
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.get_settings().set_long_property('gtk-button-images',
True, '')
dic = {
"on_buttonQuit_clicked": self.quit,
"on_windowMain_destroy": self.quit,
"gtk_main_quit": self.quit,
"show_about": self.show_about,
"cursor_changed": self.on_account_selected,
"add_entry": self.add_entry,
"add_folder": self.add_folder,
"edit_entry": self.edit_entry,
"remove_entry": self.remove_entry,
"generate_password": self.generate_password,
"save_database": self.save_database,
"save_as_database": self.save_as_database,
"open_database": self.open_database,
"set_key": self.set_key,
"menu_new_db": self.new_database,
}
self.builder.connect_signals(dic)
self.update_status_bar(_("Welcome into pypass"))
self.modified_db = False
self.mainwindow.show()
gtk.main()
def set_button_toolbar(self):
""" Set the button toolbar with their logo """
butons = {
"b_open": gtk.STOCK_OPEN,
"b_save": gtk.STOCK_SAVE,
"b_folder": gtk.STOCK_DIRECTORY,
"b_add": gtk.STOCK_ADD,
"b_edit": gtk.STOCK_EDIT,
"b_del": gtk.STOCK_REMOVE,
"b_quit": gtk.STOCK_QUIT,
"b_about": gtk.STOCK_ABOUT,
}
self.set_button_img(butons)
def set_button_img(self, butons):
""" For a given hash of button, set the image """
for buton in butons.keys():
butonopen = self.builder.get_object(buton)
img = gtk.image_new_from_stock(butons[buton],
gtk.ICON_SIZE_LARGE_TOOLBAR)
butonopen.set_image(img)
def load_password_tree(self, obj, parent=None):
"""
Load a given tree into the treefolderview using the
load_pypfolder function
"""
self.data = obj
treeview = self.builder.get_object("treefolderview")
treestore = gtk.TreeStore(str, str, str)
self.load_pypfolder(treestore, obj, parent)
treeview.set_model(treestore)
def load_pypfolder(self, treestore, obj, parent=None):
"""
Loads the given PypFolder into the given treestore with the
given parent
This function is recursive to load the whole tree in memory
including children folders and accounts
"""
for folder in obj.folders:
icon = gtk.STOCK_DIRECTORY
parent2 = treestore.append(parent, [folder.name, icon, "folder"])
self.load_pypfolder(treestore, folder, parent2)
for account in obj.accounts:
icon = gtk.STOCK_DIALOG_AUTHENTICATION
treestore.append(parent, [account.name, icon, "account"])
def show_about(self, widget):
""" Show the about diaglog """
about = self.builder.get_object("aboutdialog")
about.set_name("PyPass")
about.set_version(__version__)
about.set_copyright(__copyright__)
about.set_authors(__credits__)
about.set_comments('\n'.join(__author__))
about.set_license(__license_text__)
about.set_website(__url__)
_logo_path = os.path.join(os.path.dirname(
os.path.realpath(__file__)), "data", "PyPass.png")
about.set_logo(gtk.gdk.pixbuf_new_from_file(_logo_path))
_dialog(about)
def new_database(self, widget):
""" Set a new database """
if self.modified_db:
result = dialog_window(_("This database has been modified",
"Do you want to save it before continuing?"),
action=gtk.MESSAGE_QUESTION)
if result == gtk.RESPONSE_YES:
self.save_database()
self.data = PypFolder()
self.filename = None
self.created = True
self.load_password_tree(self.data)
def quit(self, widget):
""" Quit the application """
LOG.info(_("Exiting..."))
if self.modified_db:
dialog = gtk.MessageDialog(None, 0, gtk.MESSAGE_WARNING)
dialog.set_markup("<b>" + _("Error") + "</b>")
dialog.format_secondary_markup(
_("Do you want to save file before quit?"))
dialog.add_buttons(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_NO, gtk.RESPONSE_NO,
gtk.STOCK_YES, gtk.RESPONSE_YES,)
result = _dialog(dialog)
if result == gtk.RESPONSE_YES:
outcome = self.save_database()
if outcome is not True:
return
elif result == gtk.RESPONSE_CANCEL:
self.mainwindow.show()
return
sys.exit(0)
def set_combox_type(self):
""" Set the different options in the combobox of the new entry
dialog """
options = {
"website": "",
"server": gtk.STOCK_NETWORK,
"ftp": "",
"Email": "",
}
combo = self.builder.get_object("combo_type")
store = gtk.ListStore(str, str)
opts = options.keys()
opts.sort()
for opt in opts:
store.append([opt, options[opt]])
combo.set_model(store)
cell = gtk.CellRendererText()
combo.pack_start(cell, True)
combo.add_attribute(cell, 'text', 0)
combo.set_active(0)
def set_keys_list(self, hide=True):
""" Set all the keys retrieved from pypass into the list """
keys = self.pypass.list_recipients()
treeview = self.builder.get_object("treeviewkey")
column_str = gtk.TreeViewColumn(_('Key ID'))
treeview.append_column(column_str)
cell = gtk.CellRendererText()
column_str.pack_start(cell, True)
column_str.add_attribute(cell, "text", 0)
column_str = gtk.TreeViewColumn('')
treeview.append_column(column_str)
cell1 = gtk.CellRendererText()
column_str.pack_start(cell1, True)
column_str.add_attribute(cell1, "text", 1)
store = gtk.ListStore(str, str)
treeview.set_model(store)
for key in keys:
store.append([key['keyid'], " ".join(key['uids'])])
def set_path(self, model, itera):
""" Set the selection focus of the tree on the element selected
and expand the selected element.
This prevent the tree from colapsing when adding a new folder.
"""
tree = self.builder.get_object("treefolderview")
if itera:
path = model.get_path(itera)
else:
path = (0,)
tree.expand_row((path[0], ), True)
tree.set_cursor(path)
def set_folder_dialog(self):
"""
Reads the dialogaddentry ui file and adjust the dialog to make it
the folder dialog (remove some item and resize it).
"""
self.builder.add_from_file(os.path.join(os.path.dirname(
os.path.realpath(__file__)), "ui", "dialogaddentry.glade"))
butons = {
"b_add_entry": gtk.STOCK_OK,
"b_cancel_entry": gtk.STOCK_CANCEL,
}
self.set_button_img(butons)
self.set_combox_type()
for objname in ("label7", "label8", "label3", "label5", "label4",
"entry_user", "entry_url", "entry_password",
"b_generate_password", "combo_type"):
obj = self.builder.get_object(objname)
obj.destroy()
add = self.builder.get_object("dialogaddentry")
add.set_title(_("Add a folder"))
add.set_size_request(400, 150)
def set_entry_dialog(self):
""" Add the entry dialog to the current window """
self.builder.add_from_file(os.path.join(os.path.dirname(
os.path.realpath(__file__)), "ui", "dialogaddentry.glade"))
butons = {
"b_add_entry": gtk.STOCK_OK,
"b_cancel_entry": gtk.STOCK_CANCEL,
}
self.set_button_img(butons)
self.set_combox_type()
dic = {"generate_password": self.generate_password}
self.builder.connect_signals(dic)
def open_database(self, widget=None):
""" Open a selected database """
# get database file
filename = file_browse(gtk.FILE_CHOOSER_ACTION_OPEN,
_("Open a database"),
os.path.expanduser('~'))
if filename is not None:
self.pypass.load_data(filename=filename)
self.filename = filename
self.created = False
if self.pypass.data is not None and self.pypass.data != "":
self.load_password_tree(self.pypass.json_to_tree())
return
def save_database(self, widget=None):
""" Save the current database """
if not self.pypass.is_default_in_keyring():
result = dialog_window(_("The key set as to encrypt this file is not" \
" installed in this machine, do you want to continue ?"),
action=gtk.MESSAGE_QUESTION)
if result == gtk.RESPONSE_NO:
return
if self.created:
self.save_as_database()
if self.filename is None:
result = dialog_window(_("No database file specified!"),
action=gtk.MESSAGE_ERROR)
return
self.pypass.data_from_json(self.data)
outcome = self.pypass.crypt(force=True, recipients=self.key,
filename=self.filename)
if outcome == "key_not_found":
result = dialog_window(_("The database could not be saved!"),
_("The key could not be found"),
action=gtk.MESSAGE_ERROR)
return
if outcome is not True:
result = dialog_window(_("The database could not be saved!"),
action=gtk.MESSAGE_ERROR)
return
self.update_status_bar(_("Database saved in %s" % self.filename))
self.modified_db = False
return True
def save_as_database(self, widget=None):
""" Save the current database in a selected file """
filename = file_browse(gtk.FILE_CHOOSER_ACTION_SAVE,
_("Save a database"),
os.path.expanduser('~'))
if filename is None:
return
self.pypass.data_from_json(self.data)
outcome = self.pypass.crypt(recipients=self.key,
filename=filename)
if outcome == "file_exists":
result = dialog_window(_("This database already exists"),
_("Do you want to overrite it ?"),
action=gtk.MESSAGE_QUESTION)
if result != gtk.RESPONSE_YES:
return
else:
outcome = self.pypass.crypt(force=True,
recipients=self.key,
filename=self.filename)
self.update_status_bar(_("Database saved"))
self.modified_db = False
def on_account_selected(self, widget=None):
""" Display the account in the window when selected on the tree """
selection = self.builder.get_object("treefolderview").get_selection()
(model, itera) = selection.get_selected()
folderspath = pyp.get_folder_path(model, itera, [])
txtpass = self.builder.get_object("labelpass")
if itera is None:
txtpass.set_text(" ")
return
item = self.pypass.get_item(self.data, folderspath,
model[itera][2], model[itera][0])
if item is None:
txtpass.set_text(" ")
return
elif isinstance(item, PypFolder):
content = ""
content += "<b>Name:</b> %s \n" % item.name
content += "<b>Description:</b> %s \n" % \
item.description
txtpass.set_text(content)
txtpass.set_use_markup(True)
return
elif isinstance(item, PypAccount):
content = ""
content += "<b>Name:</b> %s \n" % item.name
content += "<b>Password:</b> %s \n" % item.password
keys = item.extras.keys()
keys.sort()
for key in keys:
if key not in ('name', 'password'):
if key.lower() == 'url':
content += "<b>%s:</b> <a href='%s'>" \
"%s</a> \n" % (key, item.extras[key],
item.extras[key])
else:
content += "<b>%s:</b> %s \n" % (
key, item.extras[key])
txtpass.set_text(content)
txtpass.set_use_markup(True)
return
else:
txtpass.set_text(" ")
def set_key(self, widget):
"""
Display the window in which the user can choose one the key
which are installed on the machine, set this key in the config file
"""
self.builder.add_from_file(os.path.join(os.path.dirname(
os.path.realpath(__file__)), "ui", "dialogkeychooser.glade"))
butons = {
"b_ok_key": gtk.STOCK_OK,
"b_cancel_key": gtk.STOCK_CANCEL,
}
self.set_button_img(butons)
self.set_keys_list()
self.builder.get_object("label_password").set_text(_("Other key:"))
self.builder.get_object("entry_key_password").set_visibility(True)
add = self.builder.get_object("dialogkeychooser")
if _dialog(add) == 1:
selection = self.builder.get_object("treeviewkey").get_selection()
(model, itera) = selection.get_selected()
otherk = self.builder.get_object("entry_key_password").get_text()
if itera is None and (otherk is None or otherk == ""):
LOG.warning("No key selected")
return
key = None
if itera is not None:
key = model[itera][0]
if otherk is not None and otherk != "":
key = otherk
if key is None:
LOG.error("bug")
LOG.debug(key)
self.key = key
self.pypass.set_recipient(key[:8])
def edit_entry(self, widget):
""" Edit an entry from the tree """
(model, itera) = self.get_path()
if itera is None:
return
folderspath = pyp.get_folder_path(model, itera, [])
item = self.pypass.get_item(self.data, folderspath,
model[itera][2], model[itera][0])
if item is None:
return
if isinstance(item, PypAccount):
self.set_entry_dialog()
# Name
entry_name = self.builder.get_object("entry_name")
entry_name.set_text(item.name)
entry_name.set_editable(False) # Name cannot be changed
# Password
entry_password = self.builder.get_object("entry_password")
entry_password.set_text(item.password)
# Extras info
if "user" in item.extras.keys():
self.builder.get_object("entry_user").set_text(
item.extras["user"])
if "url" in item.extras.keys():
self.builder.get_object("entry_url").set_text(
item.extras["url"])
if "description" in item.extras.keys():
self.builder.get_object("entry_description").set_text(
item.extras["description"])
item = self.get_password_from_dialog()
else:
self.set_folder_dialog()
# Name
entry_name = self.builder.get_object("entry_name")
entry_name.set_text(item.name)
entry_name.set_editable(False) # Name cannot be changed
# Password
if item.description is not None:
entry_description = self.builder.get_object("entry_description")
entry_description.set_text(item.description)
item = self.get_folder_from_dialog()
if item is None:
return
self.data = self.pypass.replace_item(self.data, model,
itera, item)
self.load_password_tree(self.data)
self.on_account_selected()
self.update_status_bar(_("Account updated"))
self.modified_db = True
def remove_entry(self, widget):
""" Remove an entry from the tree """
(model, itera) = self.get_path()
folderspath = pyp.get_folder_path(model, itera, [])
item = self.pypass.get_item(self.data, folderspath,
model[itera][2], model[itera][0])
result = dialog_window(_("You are going to remove %s.") % item.name,
_("Do you want to continue ?"),
action=gtk.MESSAGE_QUESTION)
if result == gtk.RESPONSE_NO:
return
elif result == gtk.RESPONSE_YES:
self.data = self.pypass.remove_item(self.data, model, itera,
item)
self.load_password_tree(self.data)
self.on_account_selected()
self.update_status_bar(_("Item removed"))
self.modified_db = True
def get_folder_from_dialog(self):
"""
Display the addentry dialog and returns the PypFolder object
"""
add = self.builder.get_object("dialogaddentry")
if _dialog(add) == 1:
name = self.builder.get_object("entry_name").get_text()
description = \
self.builder.get_object("entry_description").get_text()
if name is None or name == "":
dialog_window(_("Could not create the folder."),
_("Name was missing."),
gtk.MESSAGE_ERROR)
return
else:
if description is None or description == "":
folder = PypFolder(name)
else:
folder = PypFolder(name, description)
add.destroy()
return folder
def get_password_from_dialog(self):
"""
Display the addentry dialog and returns the PypAccount object
"""
add = self.builder.get_object("dialogaddentry")
if _dialog(add) == 1:
name = self.builder.get_object("entry_name").get_text()
password = self.builder.get_object("entry_password").get_text()
user = self.builder.get_object("entry_user").get_text()
url = self.builder.get_object("entry_url").get_text()
description = \
self.builder.get_object("entry_description").get_text()
passtype = self.builder.get_object("combo_type").get_active()
if "" in (name, password):
dialog_window(_("Could not add the account"),
_("Name and password are both required."),
gtk.MESSAGE_ERROR)
return
else:
account = PypAccount(name, password)
if url is not "":
account.extras['url'] = url
if user is not "":
account.extras['user'] = user
if description is not "":
account.extras['description'] = description
add.destroy()
return account
def add_entry(self, widget):
""" Display the dialog to add an entry to the database """
self.set_entry_dialog()
account = self.get_password_from_dialog()
if account is None:
return
(model, itera) = self.get_path()
data = self.pypass.add_account(
self.data, model, itera, account)
if data == "duplicate_entry":
dialog_window(_("Could not add the account"),
_("There is already an account with this name."),
gtk.MESSAGE_ERROR)
return
self.load_password_tree(data)
self.update_status_bar(_("Account added"))
self.modified_db = True
def add_folder(self, widget):
""" Display the dialog to add a folder to the database """
self.set_folder_dialog()
folder = self.get_folder_from_dialog()
if folder is None:
return
(model, itera) = self.get_path()
data = self.pypass.add_folder(self.data, model, itera, folder)
if data == "duplicate_entry":
dialog_window(_("Could not add the folder"),
_("There is already a folder with this name."),
gtk.MESSAGE_ERROR)
return
self.load_password_tree(data)
self.update_status_bar(_("Folder added"))
self.modified_db = True
self.set_path(model, itera)
def update_status_bar(self, entry):
""" Update the status bar with the given text """
stbar = self.builder.get_object('statusbar')
stbar.push(1, entry)
def get_path(self):
""" Retrieve the path selected """
selection = self.builder.get_object("treefolderview").get_selection()
return selection.get_selected()
def generate_password(self, widget):
""" Generate a random password """
password = self.pypass.generate_password()
entry = self.builder.get_object("entry_password")
entry.set_text(password)
return
|
pypingou/pypass-gnome
|
src/gui.py
|
Python
|
gpl-3.0
| 29,863
|
from django.conf.urls import patterns, url
from core.views import UserList, UserDetail, MeetingsList, MeetingDetail, UserCreate, AuthView
urlpatterns = [
url(r'^api-token-auth/', AuthView.as_view(), name='auth'),
url(r'^user-create/$', UserCreate.as_view(), name='user-create'),
url(r'^users-list/$', UserList.as_view(), name='users-list'),
url(r'^user-detail$', UserDetail.as_view(), name='user-detail'),
url(r'^user-detail/(?P<pk>\d+)$', UserDetail.as_view(), name='user-detail'),
url(r'^meetings-list/$', MeetingsList.as_view(), name='meetings-list'),
url(r'^meeting-detail$', MeetingDetail.as_view(), name='meeting-detail'),
url(r'^meeting-detail/(?P<pk>\d+)/$', MeetingDetail.as_view(), name='meeting-detail'),
]
|
KraftSoft/together
|
core/urls.py
|
Python
|
bsd-3-clause
| 756
|
#encoding: utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf-8') #设置命令行为utf-8
import os
from functools import wraps
from flask import Flask #从flask包导入Flask类
from flask import render_template #从flask包导入render_template函数
from flask import request #从flask包导入request对象
from flask import redirect #从flask包导入redirect函数
from flask import url_for
from flask import session
from flask import flash
import userdb as user
#import user #导入user模块
#import loganalysis
import loganalysisdb as loganalysis
app = Flask(__name__)
app.secret_key = 'oF\xd3I\x98\xe5\xb4\x1a\xfb\xc77\xe3\xcc,\xc2\xd2\x05\x8b\xa9\x9b\x01\xa0t\x0f\x04\x11\x19\xcd4\x96\x8d\x14'
def login_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
if session.get('user') is None:
return redirect('/')
rt = func(*args, **kwargs)
return rt
return wrapper
'''打开用户登录页面
'''
@app.route('/') #将url path=/的请求交由index函数处理
def index():
return render_template('login.html') #加载login.html模板,并返回页面内容
'''用户登录信息检查
'''
@app.route('/login/', methods=["POST"]) #将url path=/login/的post请求交由login函数处理
def login():
username = request.form.get('username', '') #接收用户提交的数据
password = request.form.get('password', '')
#需要验证用户名密码是否正确
print username, password
if user.validate_login(username, password):
session['user'] = {'username' : username}
return redirect('/users/') #跳转到url /users/
else:
#登录失败
return render_template('login.html', username=username, error='用户名或密码错误')
'''用户列表显示
'''
@app.route('/users/') #将url path=/users/的get请求交由users函数处理
def users():
#获取所有用户的信息
print session
if session.get('user') is None:
return redirect('/')
_users = user.get_users()
return render_template('users.html', users=_users, username=session.get('user').get('username'), msg=request.args.get('msg', '')) #加载渲染users.html模板
'''跳转到新建用户信息输入的页面
'''
@app.route('/user/create/') #将url path=/user/create/的get请求交由create_user处理
@login_required
def create_user():
return render_template('user_create.html') #加载渲染user_create.html
'''存储新建用户的信息
'''
@app.route('/user/add/', methods=['POST']) #将url path=/user/add的post请求交由add_user处理
@login_required
def add_user():
username = request.form.get('username', '')
password = request.form.get('password', '')
age = request.form.get('age', '')
gender = request.form.get('gender', '1')
hobby = request.form.getlist('hobby')
img = request.files.get('img')
if img:
print img.filename
img.save('/tmp/kk.txt')
print request.form
print gender
print hobby
#检查用户信息
_is_ok, _error = user.validate_add_user(username, password, age)
if _is_ok:
user.add_user(username, password, age) #检查ok,添加用户信息
return redirect(url_for('users', msg='新建成功')) #跳转到用户列表url_for
else:
#跳转到用户新建页面,回显错误信息&用户信息
return render_template('user_create.html', \
error=_error, username=username, \
password=password, age=age)
'''打开用户信息修改页面
'''
@app.route('/user/modify/') #将url path=/user/modify/的歌特请求交由modify_user函数处理
@login_required
def modify_user():
uid = request.args.get('id', '')
_user = user.get_user(uid)
_error = ''
_uid = ''
_username = ''
_password = ''
_age = ''
if _user is None:
_error = '用户信息不存在'
else:
_uid = _user.get('id')
_username = _user.get('username')
_password = _user.get('password')
_age = _user.get('age')
return render_template('user_modify.html', error=_error, password=_password, age=_age, username=_username, uid=_uid)
'''保存修改用户数据
'''
@app.route('/user/update/', methods=['POST']) #将url path=/user/update/的post请求交由update_user函数处理
@login_required
def update_user():
uid = request.form.get('id', '')
username = request.form.get('username', '')
password = request.form.get('password', '')
age = request.form.get('age', '')
#检查用户信息
_is_ok, _error = user.validate_update_user(uid, username, password, age)
if _is_ok:
user.update_user(uid, username, password, age)
flash('修改用户信息成功')
return redirect('/users/')
else:
return render_template('user_modify.html', error=_error, username=username, password=password, age=age, uid=uid)
@app.route('/user/delete/')
@login_required
def delete_user():
uid = request.args.get('id', '')
user.delete_user(uid)
flash('删除用户信息成功')
return redirect('/users/')
@app.route('/logs/')
@login_required
def logs():
topn = request.args.get('topn', 10)
topn = int(topn) if str(topn).isdigit() else 10
rt_list = loganalysis.get_topn(topn=topn)
return render_template('logs.html', rt_list=rt_list, title='topn log')
@app.route('/logout/')
@login_required
def logout():
session.clear()
print session
return redirect('/')
@app.route('/test/', methods=['POST', 'GET'])
def test():
print request.args
print request.form
print request.files
print request.headers
return render_template('test.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9001, debug=True)
|
51reboot/actual_09_homework
|
07/zhaoyuanhai/cmdb/app.py
|
Python
|
mit
| 6,366
|
1000L
u"unicode string"
U"unicode string"
ur"raw unicode"
UR"raw unicode"
Ur"raw unicode"
uR"raw unicode"
u"""unicode string"""
U"""unicode string"""
ur"""raw unicode"""
UR"""raw unicode"""
Ur"""raw unicode"""
uR"""raw unicode"""
u'unicode string'
U'unicode string'
ur'raw unicode'
UR'raw unicode'
Ur'raw unicode'
uR'raw unicode'
u'''unicode string'''
U'''unicode string'''
ur'''raw unicode'''
UR'''raw unicode'''
Ur'''raw unicode'''
uR'''raw unicode'''
u"\
\\\'\"\a\b\f\n\r\t\u2026\v\052\x2A"
u'\N{COLON}'
-2147483648l
-2147483648L
0720
-100L
|
DinoV/PTVS
|
Python/Tests/TestData/Grammar/LiteralsV2.py
|
Python
|
apache-2.0
| 543
|
import macpath
from test import test_genericpath
import unittest
class MacPathTestCase(unittest.TestCase):
def test_abspath(self):
self.assertEqual(macpath.abspath("xx:yy"), "xx:yy")
def test_isabs(self):
isabs = macpath.isabs
self.assertTrue(isabs("xx:yy"))
self.assertTrue(isabs("xx:yy:"))
self.assertTrue(isabs("xx:"))
self.assertFalse(isabs("foo"))
self.assertFalse(isabs(":foo"))
self.assertFalse(isabs(":foo:bar"))
self.assertFalse(isabs(":foo:bar:"))
self.assertTrue(isabs(b"xx:yy"))
self.assertTrue(isabs(b"xx:yy:"))
self.assertTrue(isabs(b"xx:"))
self.assertFalse(isabs(b"foo"))
self.assertFalse(isabs(b":foo"))
self.assertFalse(isabs(b":foo:bar"))
self.assertFalse(isabs(b":foo:bar:"))
def test_split(self):
split = macpath.split
self.assertEqual(split("foo:bar"),
('foo:', 'bar'))
self.assertEqual(split("conky:mountpoint:foo:bar"),
('conky:mountpoint:foo', 'bar'))
self.assertEqual(split(":"), ('', ''))
self.assertEqual(split(":conky:mountpoint:"),
(':conky:mountpoint', ''))
self.assertEqual(split(b"foo:bar"),
(b'foo:', b'bar'))
self.assertEqual(split(b"conky:mountpoint:foo:bar"),
(b'conky:mountpoint:foo', b'bar'))
self.assertEqual(split(b":"), (b'', b''))
self.assertEqual(split(b":conky:mountpoint:"),
(b':conky:mountpoint', b''))
def test_join(self):
join = macpath.join
self.assertEqual(join('a', 'b'), ':a:b')
self.assertEqual(join(':a', 'b'), ':a:b')
self.assertEqual(join(':a:', 'b'), ':a:b')
self.assertEqual(join(':a::', 'b'), ':a::b')
self.assertEqual(join(':a', '::b'), ':a::b')
self.assertEqual(join('a', ':'), ':a:')
self.assertEqual(join('a:', ':'), 'a:')
self.assertEqual(join('a', ''), ':a:')
self.assertEqual(join('a:', ''), 'a:')
self.assertEqual(join('', ''), '')
self.assertEqual(join('', 'a:b'), 'a:b')
self.assertEqual(join('', 'a', 'b'), ':a:b')
self.assertEqual(join('a:b', 'c'), 'a:b:c')
self.assertEqual(join('a:b', ':c'), 'a:b:c')
self.assertEqual(join('a', ':b', ':c'), ':a:b:c')
self.assertEqual(join('a', 'b:'), 'b:')
self.assertEqual(join('a:', 'b:'), 'b:')
self.assertEqual(join(b'a', b'b'), b':a:b')
self.assertEqual(join(b':a', b'b'), b':a:b')
self.assertEqual(join(b':a:', b'b'), b':a:b')
self.assertEqual(join(b':a::', b'b'), b':a::b')
self.assertEqual(join(b':a', b'::b'), b':a::b')
self.assertEqual(join(b'a', b':'), b':a:')
self.assertEqual(join(b'a:', b':'), b'a:')
self.assertEqual(join(b'a', b''), b':a:')
self.assertEqual(join(b'a:', b''), b'a:')
self.assertEqual(join(b'', b''), b'')
self.assertEqual(join(b'', b'a:b'), b'a:b')
self.assertEqual(join(b'', b'a', b'b'), b':a:b')
self.assertEqual(join(b'a:b', b'c'), b'a:b:c')
self.assertEqual(join(b'a:b', b':c'), b'a:b:c')
self.assertEqual(join(b'a', b':b', b':c'), b':a:b:c')
self.assertEqual(join(b'a', b'b:'), b'b:')
self.assertEqual(join(b'a:', b'b:'), b'b:')
def test_splitext(self):
splitext = macpath.splitext
self.assertEqual(splitext(":foo.ext"), (':foo', '.ext'))
self.assertEqual(splitext("foo:foo.ext"), ('foo:foo', '.ext'))
self.assertEqual(splitext(".ext"), ('.ext', ''))
self.assertEqual(splitext("foo.ext:foo"), ('foo.ext:foo', ''))
self.assertEqual(splitext(":foo.ext:"), (':foo.ext:', ''))
self.assertEqual(splitext(""), ('', ''))
self.assertEqual(splitext("foo.bar.ext"), ('foo.bar', '.ext'))
self.assertEqual(splitext(b":foo.ext"), (b':foo', b'.ext'))
self.assertEqual(splitext(b"foo:foo.ext"), (b'foo:foo', b'.ext'))
self.assertEqual(splitext(b".ext"), (b'.ext', b''))
self.assertEqual(splitext(b"foo.ext:foo"), (b'foo.ext:foo', b''))
self.assertEqual(splitext(b":foo.ext:"), (b':foo.ext:', b''))
self.assertEqual(splitext(b""), (b'', b''))
self.assertEqual(splitext(b"foo.bar.ext"), (b'foo.bar', b'.ext'))
def test_ismount(self):
ismount = macpath.ismount
self.assertEqual(ismount("a:"), True)
self.assertEqual(ismount("a:b"), False)
self.assertEqual(ismount("a:b:"), True)
self.assertEqual(ismount(""), False)
self.assertEqual(ismount(":"), False)
self.assertEqual(ismount(b"a:"), True)
self.assertEqual(ismount(b"a:b"), False)
self.assertEqual(ismount(b"a:b:"), True)
self.assertEqual(ismount(b""), False)
self.assertEqual(ismount(b":"), False)
def test_normpath(self):
normpath = macpath.normpath
self.assertEqual(normpath("a:b"), "a:b")
self.assertEqual(normpath("a"), ":a")
self.assertEqual(normpath("a:b::c"), "a:c")
self.assertEqual(normpath("a:b:c:::d"), "a:d")
self.assertRaises(macpath.norm_error, normpath, "a::b")
self.assertRaises(macpath.norm_error, normpath, "a:b:::c")
self.assertEqual(normpath(":"), ":")
self.assertEqual(normpath("a:"), "a:")
self.assertEqual(normpath("a:b:"), "a:b")
self.assertEqual(normpath(b"a:b"), b"a:b")
self.assertEqual(normpath(b"a"), b":a")
self.assertEqual(normpath(b"a:b::c"), b"a:c")
self.assertEqual(normpath(b"a:b:c:::d"), b"a:d")
self.assertRaises(macpath.norm_error, normpath, b"a::b")
self.assertRaises(macpath.norm_error, normpath, b"a:b:::c")
self.assertEqual(normpath(b":"), b":")
self.assertEqual(normpath(b"a:"), b"a:")
self.assertEqual(normpath(b"a:b:"), b"a:b")
class MacCommonTest(test_genericpath.CommonTest, unittest.TestCase):
pathmodule = macpath
test_relpath_errors = None
if __name__ == "__main__":
unittest.main()
|
yotchang4s/cafebabepy
|
src/main/python/test/test_macpath.py
|
Python
|
bsd-3-clause
| 6,172
|
from __future__ import print_function, division
import random
from sympy import Derivative
from sympy.core.basic import Basic
from sympy.core.compatibility import is_sequence, as_int, range
from sympy.core.function import count_ops
from sympy.core.decorators import call_highest_priority
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.core.sympify import sympify
from sympy.functions.elementary.trigonometric import cos, sin
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.simplify import simplify as _simplify
from sympy.utilities.misc import filldedent
from sympy.utilities.decorator import doctest_depends_on
from sympy.matrices.matrices import (MatrixBase,
ShapeError, a2idx, classof)
def _iszero(x):
"""Returns True if x is zero."""
return x.is_zero
class DenseMatrix(MatrixBase):
is_MatrixExpr = False
_op_priority = 10.01
_class_priority = 4
@call_highest_priority('__radd__')
def __add__(self, other):
return super(DenseMatrix, self).__add__(_force_mutable(other))
@call_highest_priority('__div__')
def __div__(self, other):
return super(DenseMatrix, self).__div__(_force_mutable(other))
def __eq__(self, other):
try:
if self.shape != other.shape:
return False
if isinstance(other, Matrix):
return self._mat == other._mat
elif isinstance(other, MatrixBase):
return self._mat == Matrix(other)._mat
except AttributeError:
return False
def __getitem__(self, key):
"""Return portion of self defined by key. If the key involves a slice
then a list will be returned (if key is a single slice) or a matrix
(if key was a tuple involving a slice).
Examples
========
>>> from sympy import Matrix, I
>>> m = Matrix([
... [1, 2 + I],
... [3, 4 ]])
If the key is a tuple that doesn't involve a slice then that element
is returned:
>>> m[1, 0]
3
When a tuple key involves a slice, a matrix is returned. Here, the
first column is selected (all rows, column 0):
>>> m[:, 0]
Matrix([
[1],
[3]])
If the slice is not a tuple then it selects from the underlying
list of elements that are arranged in row order and a list is
returned if a slice is involved:
>>> m[0]
1
>>> m[::2]
[1, 3]
"""
if isinstance(key, tuple):
i, j = key
try:
i, j = self.key2ij(key)
return self._mat[i*self.cols + j]
except (TypeError, IndexError):
if isinstance(i, slice):
# XXX remove list() when PY2 support is dropped
i = list(range(self.rows))[i]
elif is_sequence(i):
pass
else:
i = [i]
if isinstance(j, slice):
# XXX remove list() when PY2 support is dropped
j = list(range(self.cols))[j]
elif is_sequence(j):
pass
else:
j = [j]
return self.extract(i, j)
else:
# row-wise decomposition of matrix
if isinstance(key, slice):
return self._mat[key]
return self._mat[a2idx(key)]
@call_highest_priority('__rmul__')
def __matmul__(self, other):
return super(DenseMatrix, self).__mul__(_force_mutable(other))
@call_highest_priority('__rmul__')
def __mul__(self, other):
return super(DenseMatrix, self).__mul__(_force_mutable(other))
def __ne__(self, other):
return not self == other
@call_highest_priority('__rpow__')
def __pow__(self, other):
return super(DenseMatrix, self).__pow__(other)
@call_highest_priority('__add__')
def __radd__(self, other):
return super(DenseMatrix, self).__radd__(_force_mutable(other))
@call_highest_priority('__mul__')
def __rmatmul__(self, other):
return super(DenseMatrix, self).__rmul__(_force_mutable(other))
@call_highest_priority('__mul__')
def __rmul__(self, other):
return super(DenseMatrix, self).__rmul__(_force_mutable(other))
@call_highest_priority('__pow__')
def __rpow__(self, other):
raise NotImplementedError("Matrix Power not defined")
@call_highest_priority('__sub__')
def __rsub__(self, other):
return super(DenseMatrix, self).__rsub__(_force_mutable(other))
def __setitem__(self, key, value):
raise NotImplementedError()
@call_highest_priority('__rsub__')
def __sub__(self, other):
return super(DenseMatrix, self).__sub__(_force_mutable(other))
@call_highest_priority('__truediv__')
def __truediv__(self, other):
return super(DenseMatrix, self).__truediv__(_force_mutable(other))
def _cholesky(self):
"""Helper function of cholesky.
Without the error checks.
To be used privately. """
L = zeros(self.rows, self.rows)
for i in range(self.rows):
for j in range(i):
L[i, j] = (1 / L[j, j])*(self[i, j] -
sum(L[i, k]*L[j, k] for k in range(j)))
L[i, i] = sqrt(self[i, i] -
sum(L[i, k]**2 for k in range(i)))
return self._new(L)
def _diagonal_solve(self, rhs):
"""Helper function of function diagonal_solve,
without the error checks, to be used privately.
"""
return self._new(rhs.rows, rhs.cols, lambda i, j: rhs[i, j] / self[i, i])
def _eval_adjoint(self):
return self.T.C
def _eval_conjugate(self):
"""By-element conjugation.
See Also
========
transpose: Matrix transposition
H: Hermite conjugation
D: Dirac conjugation
"""
out = self._new(self.rows, self.cols,
lambda i, j: self[i, j].conjugate())
return out
def _eval_determinant(self):
return self.det()
def _eval_diff(self, *args, **kwargs):
if kwargs.pop("evaluate", True):
return self.diff(*args)
else:
return Derivative(self, *args, **kwargs)
def _eval_inverse(self, **kwargs):
"""Return the matrix inverse using the method indicated (default
is Gauss elimination).
kwargs
======
method : ('GE', 'LU', or 'ADJ')
iszerofunc
try_block_diag
Notes
=====
According to the ``method`` keyword, it calls the appropriate method:
GE .... inverse_GE(); default
LU .... inverse_LU()
ADJ ... inverse_ADJ()
According to the ``try_block_diag`` keyword, it will try to form block
diagonal matrices using the method get_diag_blocks(), invert these
individually, and then reconstruct the full inverse matrix.
Note, the GE and LU methods may require the matrix to be simplified
before it is inverted in order to properly detect zeros during
pivoting. In difficult cases a custom zero detection function can
be provided by setting the ``iszerosfunc`` argument to a function that
should return True if its argument is zero. The ADJ routine computes
the determinant and uses that to detect singular matrices in addition
to testing for zeros on the diagonal.
See Also
========
inverse_LU
inverse_GE
inverse_ADJ
"""
from sympy.matrices import diag
method = kwargs.get('method', 'GE')
iszerofunc = kwargs.get('iszerofunc', _iszero)
if kwargs.get('try_block_diag', False):
blocks = self.get_diag_blocks()
r = []
for block in blocks:
r.append(block.inv(method=method, iszerofunc=iszerofunc))
return diag(*r)
M = self.as_mutable()
if method == "GE":
rv = M.inverse_GE(iszerofunc=iszerofunc)
elif method == "LU":
rv = M.inverse_LU(iszerofunc=iszerofunc)
elif method == "ADJ":
rv = M.inverse_ADJ(iszerofunc=iszerofunc)
else:
# make sure to add an invertibility check (as in inverse_LU)
# if a new method is added.
raise ValueError("Inversion method unrecognized")
return self._new(rv)
def _eval_trace(self):
"""Calculate the trace of a square matrix.
Examples
========
>>> from sympy.matrices import eye
>>> eye(3).trace()
3
"""
trace = 0
for i in range(self.cols):
trace += self._mat[i*self.cols + i]
return trace
def _eval_transpose(self):
"""Matrix transposition.
Examples
========
>>> from sympy import Matrix, I
>>> m=Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m.transpose()
Matrix([
[ 1, 3],
[2 + I, 4]])
>>> m.T == m.transpose()
True
See Also
========
conjugate: By-element conjugation
"""
a = []
for i in range(self.cols):
a.extend(self._mat[i::self.cols])
return self._new(self.cols, self.rows, a)
def _LDLdecomposition(self):
"""Helper function of LDLdecomposition.
Without the error checks.
To be used privately.
"""
D = zeros(self.rows, self.rows)
L = eye(self.rows)
for i in range(self.rows):
for j in range(i):
L[i, j] = (1 / D[j, j])*(self[i, j] - sum(
L[i, k]*L[j, k]*D[k, k] for k in range(j)))
D[i, i] = self[i, i] - sum(L[i, k]**2*D[k, k]
for k in range(i))
return self._new(L), self._new(D)
def _lower_triangular_solve(self, rhs):
"""Helper function of function lower_triangular_solve.
Without the error checks.
To be used privately.
"""
X = zeros(self.rows, rhs.cols)
for j in range(rhs.cols):
for i in range(self.rows):
if self[i, i] == 0:
raise TypeError("Matrix must be non-singular.")
X[i, j] = (rhs[i, j] - sum(self[i, k]*X[k, j]
for k in range(i))) / self[i, i]
return self._new(X)
def _upper_triangular_solve(self, rhs):
"""Helper function of function upper_triangular_solve.
Without the error checks, to be used privately. """
X = zeros(self.rows, rhs.cols)
for j in range(rhs.cols):
for i in reversed(range(self.rows)):
if self[i, i] == 0:
raise ValueError("Matrix must be non-singular.")
X[i, j] = (rhs[i, j] - sum(self[i, k]*X[k, j]
for k in range(i + 1, self.rows))) / self[i, i]
return self._new(X)
def applyfunc(self, f):
"""Apply a function to each element of the matrix.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, lambda i, j: i*2+j)
>>> m
Matrix([
[0, 1],
[2, 3]])
>>> m.applyfunc(lambda i: 2*i)
Matrix([
[0, 2],
[4, 6]])
"""
if not callable(f):
raise TypeError("`f` must be callable.")
out = self._new(self.rows, self.cols, list(map(f, self._mat)))
return out
def as_immutable(self):
"""Returns an Immutable version of this Matrix
"""
from .immutable import ImmutableMatrix as cls
if self.rows and self.cols:
return cls._new(self.tolist())
return cls._new(self.rows, self.cols, [])
def as_mutable(self):
"""Returns a mutable version of this matrix
Examples
========
>>> from sympy import ImmutableMatrix
>>> X = ImmutableMatrix([[1, 2], [3, 4]])
>>> Y = X.as_mutable()
>>> Y[1, 1] = 5 # Can set values in Y
>>> Y
Matrix([
[1, 2],
[3, 5]])
"""
return Matrix(self)
def col(self, j):
"""Elementary column selector.
Examples
========
>>> from sympy import eye
>>> eye(2).col(0)
Matrix([
[1],
[0]])
See Also
========
row
col_op
col_swap
col_del
col_join
col_insert
"""
return self[:, j]
def equals(self, other, failing_expression=False):
"""Applies ``equals`` to corresponding elements of the matrices,
trying to prove that the elements are equivalent, returning True
if they are, False if any pair is not, and None (or the first
failing expression if failing_expression is True) if it cannot
be decided if the expressions are equivalent or not. This is, in
general, an expensive operation.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x
>>> from sympy import cos
>>> A = Matrix([x*(x - 1), 0])
>>> B = Matrix([x**2 - x, 0])
>>> A == B
False
>>> A.simplify() == B.simplify()
True
>>> A.equals(B)
True
>>> A.equals(2)
False
See Also
========
sympy.core.expr.equals
"""
try:
if self.shape != other.shape:
return False
rv = True
for i in range(self.rows):
for j in range(self.cols):
ans = self[i, j].equals(other[i, j], failing_expression)
if ans is False:
return False
elif ans is not True and rv is True:
rv = ans
return rv
except AttributeError:
return False
@classmethod
def eye(cls, n):
"""Return an n x n identity matrix."""
n = as_int(n)
mat = [cls._sympify(0)]*n*n
mat[::n + 1] = [cls._sympify(1)]*n
return cls._new(n, n, mat)
@property
def is_Identity(self):
if not self.is_square:
return False
if not all(self[i, i] == 1 for i in range(self.rows)):
return False
for i in range(self.rows):
for j in range(i + 1, self.cols):
if self[i, j] or self[j, i]:
return False
return True
def reshape(self, rows, cols):
"""Reshape the matrix. Total number of elements must remain the same.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 3, lambda i, j: 1)
>>> m
Matrix([
[1, 1, 1],
[1, 1, 1]])
>>> m.reshape(1, 6)
Matrix([[1, 1, 1, 1, 1, 1]])
>>> m.reshape(3, 2)
Matrix([
[1, 1],
[1, 1],
[1, 1]])
"""
if len(self) != rows*cols:
raise ValueError("Invalid reshape parameters %d %d" % (rows, cols))
return self._new(rows, cols, lambda i, j: self._mat[i*cols + j])
def row(self, i):
"""Elementary row selector.
Examples
========
>>> from sympy import eye
>>> eye(2).row(0)
Matrix([[1, 0]])
See Also
========
col
row_op
row_swap
row_del
row_join
row_insert
"""
return self[i, :]
def tolist(self):
"""Return the Matrix as a nested Python list.
Examples
========
>>> from sympy import Matrix, ones
>>> m = Matrix(3, 3, range(9))
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> m.tolist()
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
>>> ones(3, 0).tolist()
[[], [], []]
When there are no rows then it will not be possible to tell how
many columns were in the original matrix:
>>> ones(0, 3).tolist()
[]
"""
if not self.rows:
return []
if not self.cols:
return [[] for i in range(self.rows)]
return [self._mat[i: i + self.cols]
for i in range(0, len(self), self.cols)]
@classmethod
def zeros(cls, r, c=None):
"""Return an r x c matrix of zeros, square if c is omitted."""
c = r if c is None else c
r = as_int(r)
c = as_int(c)
return cls._new(r, c, [cls._sympify(0)]*r*c)
############################
# Mutable matrix operators #
############################
def _force_mutable(x):
"""Return a matrix as a Matrix, otherwise return x."""
if getattr(x, 'is_Matrix', False):
return x.as_mutable()
elif isinstance(x, Basic):
return x
elif hasattr(x, '__array__'):
a = x.__array__()
if len(a.shape) == 0:
return sympify(a)
return Matrix(x)
return x
class MutableDenseMatrix(DenseMatrix, MatrixBase):
def __new__(cls, *args, **kwargs):
return cls._new(*args, **kwargs)
@classmethod
def _new(cls, *args, **kwargs):
rows, cols, flat_list = cls._handle_creation_inputs(*args, **kwargs)
self = object.__new__(cls)
self.rows = rows
self.cols = cols
self._mat = list(flat_list) # create a shallow copy
return self
def __setitem__(self, key, value):
"""
Examples
========
>>> from sympy import Matrix, I, zeros, ones
>>> m = Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m[1, 0] = 9
>>> m
Matrix([
[1, 2 + I],
[9, 4]])
>>> m[1, 0] = [[0, 1]]
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = zeros(4)
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2; M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4; M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
rv = self._setitem(key, value)
if rv is not None:
i, j, value = rv
self._mat[i*self.cols + j] = value
def as_mutable(self):
return self.copy()
def col_del(self, i):
"""Delete the given column.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.col_del(1)
>>> M
Matrix([
[1, 0],
[0, 0],
[0, 1]])
See Also
========
col
row_del
"""
if i < -self.cols or i >= self.cols:
raise IndexError("Index out of range: 'i=%s', valid -%s <= i < %s"
% (i, self.cols, self.cols))
for j in range(self.rows - 1, -1, -1):
del self._mat[i + j*self.cols]
self.cols -= 1
def col_op(self, j, f):
"""In-place operation on col j using two-arg functor whose args are
interpreted as (self[i, j], i).
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.col_op(1, lambda v, i: v + 2*M[i, 0]); M
Matrix([
[1, 2, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
col
row_op
"""
self._mat[j::self.cols] = [f(*t) for t in list(zip(self._mat[j::self.cols], list(range(self.rows))))]
def col_swap(self, i, j):
"""Swap the two given columns of the matrix in-place.
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix([[1, 0], [1, 0]])
>>> M
Matrix([
[1, 0],
[1, 0]])
>>> M.col_swap(0, 1)
>>> M
Matrix([
[0, 1],
[0, 1]])
See Also
========
col
row_swap
"""
for k in range(0, self.rows):
self[k, i], self[k, j] = self[k, j], self[k, i]
def copyin_list(self, key, value):
"""Copy in elements from a list.
Parameters
==========
key : slice
The section of this matrix to replace.
value : iterable
The iterable to copy values from.
Examples
========
>>> from sympy.matrices import eye
>>> I = eye(3)
>>> I[:2, 0] = [1, 2] # col
>>> I
Matrix([
[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
>>> I[1, :2] = [[3, 4]]
>>> I
Matrix([
[1, 0, 0],
[3, 4, 0],
[0, 0, 1]])
See Also
========
copyin_matrix
"""
if not is_sequence(value):
raise TypeError("`value` must be an ordered iterable, not %s." % type(value))
return self.copyin_matrix(key, Matrix(value))
def copyin_matrix(self, key, value):
"""Copy in values from a matrix into the given bounds.
Parameters
==========
key : slice
The section of this matrix to replace.
value : Matrix
The matrix to copy values from.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> M = Matrix([[0, 1], [2, 3], [4, 5]])
>>> I = eye(3)
>>> I[:3, :2] = M
>>> I
Matrix([
[0, 1, 0],
[2, 3, 0],
[4, 5, 1]])
>>> I[0, 1] = M
>>> I
Matrix([
[0, 0, 1],
[2, 2, 3],
[4, 4, 5]])
See Also
========
copyin_list
"""
rlo, rhi, clo, chi = self.key2bounds(key)
shape = value.shape
dr, dc = rhi - rlo, chi - clo
if shape != (dr, dc):
raise ShapeError(filldedent("The Matrix `value` doesn't have the "
"same dimensions "
"as the in sub-Matrix given by `key`."))
for i in range(value.rows):
for j in range(value.cols):
self[i + rlo, j + clo] = value[i, j]
def fill(self, value):
"""Fill the matrix with the scalar value.
See Also
========
zeros
ones
"""
self._mat = [value]*len(self)
def row_del(self, i):
"""Delete the given row.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.row_del(1)
>>> M
Matrix([
[1, 0, 0],
[0, 0, 1]])
See Also
========
row
col_del
"""
if i < -self.rows or i >= self.rows:
raise IndexError("Index out of range: 'i = %s', valid -%s <= i"
" < %s" % (i, self.rows, self.rows))
if i < 0:
i += self.rows
del self._mat[i*self.cols:(i+1)*self.cols]
self.rows -= 1
def row_op(self, i, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], j)``.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.row_op(1, lambda v, j: v + 2*M[0, j]); M
Matrix([
[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
See Also
========
row
zip_row_op
col_op
"""
i0 = i*self.cols
ri = self._mat[i0: i0 + self.cols]
self._mat[i0: i0 + self.cols] = [ f(x, j) for x, j in zip(ri, list(range(self.cols))) ]
def row_swap(self, i, j):
"""Swap the two given rows of the matrix in-place.
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix([[0, 1], [1, 0]])
>>> M
Matrix([
[0, 1],
[1, 0]])
>>> M.row_swap(0, 1)
>>> M
Matrix([
[1, 0],
[0, 1]])
See Also
========
row
col_swap
"""
for k in range(0, self.cols):
self[i, k], self[j, k] = self[j, k], self[i, k]
def simplify(self, ratio=1.7, measure=count_ops):
"""Applies simplify to the elements of a matrix in place.
This is a shortcut for M.applyfunc(lambda x: simplify(x, ratio, measure))
See Also
========
sympy.simplify.simplify.simplify
"""
for i in range(len(self._mat)):
self._mat[i] = _simplify(self._mat[i], ratio=ratio,
measure=measure)
def zip_row_op(self, i, k, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], self[k, j])``.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M
Matrix([
[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
See Also
========
row
row_op
col_op
"""
i0 = i*self.cols
k0 = k*self.cols
ri = self._mat[i0: i0 + self.cols]
rk = self._mat[k0: k0 + self.cols]
self._mat[i0: i0 + self.cols] = [ f(x, y) for x, y in zip(ri, rk) ]
# Utility functions
MutableMatrix = Matrix = MutableDenseMatrix
###########
# Numpy Utility Functions:
# list2numpy, matrix2numpy, symmarray, rot_axis[123]
###########
def list2numpy(l, dtype=object): # pragma: no cover
"""Converts python list of SymPy expressions to a NumPy array.
See Also
========
matrix2numpy
"""
from numpy import empty
a = empty(len(l), dtype)
for i, s in enumerate(l):
a[i] = s
return a
def matrix2numpy(m, dtype=object): # pragma: no cover
"""Converts SymPy's matrix to a NumPy array.
See Also
========
list2numpy
"""
from numpy import empty
a = empty(m.shape, dtype)
for i in range(m.rows):
for j in range(m.cols):
a[i, j] = m[i, j]
return a
def rot_axis3(theta):
"""Returns a rotation matrix for a rotation of theta (in radians) about
the 3-axis.
Examples
========
>>> from sympy import pi
>>> from sympy.matrices import rot_axis3
A rotation of pi/3 (60 degrees):
>>> theta = pi/3
>>> rot_axis3(theta)
Matrix([
[ 1/2, sqrt(3)/2, 0],
[-sqrt(3)/2, 1/2, 0],
[ 0, 0, 1]])
If we rotate by pi/2 (90 degrees):
>>> rot_axis3(pi/2)
Matrix([
[ 0, 1, 0],
[-1, 0, 0],
[ 0, 0, 1]])
See Also
========
rot_axis1: Returns a rotation matrix for a rotation of theta (in radians)
about the 1-axis
rot_axis2: Returns a rotation matrix for a rotation of theta (in radians)
about the 2-axis
"""
ct = cos(theta)
st = sin(theta)
lil = ((ct, st, 0),
(-st, ct, 0),
(0, 0, 1))
return Matrix(lil)
def rot_axis2(theta):
"""Returns a rotation matrix for a rotation of theta (in radians) about
the 2-axis.
Examples
========
>>> from sympy import pi
>>> from sympy.matrices import rot_axis2
A rotation of pi/3 (60 degrees):
>>> theta = pi/3
>>> rot_axis2(theta)
Matrix([
[ 1/2, 0, -sqrt(3)/2],
[ 0, 1, 0],
[sqrt(3)/2, 0, 1/2]])
If we rotate by pi/2 (90 degrees):
>>> rot_axis2(pi/2)
Matrix([
[0, 0, -1],
[0, 1, 0],
[1, 0, 0]])
See Also
========
rot_axis1: Returns a rotation matrix for a rotation of theta (in radians)
about the 1-axis
rot_axis3: Returns a rotation matrix for a rotation of theta (in radians)
about the 3-axis
"""
ct = cos(theta)
st = sin(theta)
lil = ((ct, 0, -st),
(0, 1, 0),
(st, 0, ct))
return Matrix(lil)
def rot_axis1(theta):
"""Returns a rotation matrix for a rotation of theta (in radians) about
the 1-axis.
Examples
========
>>> from sympy import pi
>>> from sympy.matrices import rot_axis1
A rotation of pi/3 (60 degrees):
>>> theta = pi/3
>>> rot_axis1(theta)
Matrix([
[1, 0, 0],
[0, 1/2, sqrt(3)/2],
[0, -sqrt(3)/2, 1/2]])
If we rotate by pi/2 (90 degrees):
>>> rot_axis1(pi/2)
Matrix([
[1, 0, 0],
[0, 0, 1],
[0, -1, 0]])
See Also
========
rot_axis2: Returns a rotation matrix for a rotation of theta (in radians)
about the 2-axis
rot_axis3: Returns a rotation matrix for a rotation of theta (in radians)
about the 3-axis
"""
ct = cos(theta)
st = sin(theta)
lil = ((1, 0, 0),
(0, ct, st),
(0, -st, ct))
return Matrix(lil)
@doctest_depends_on(modules=('numpy',))
def symarray(prefix, shape, **kwargs): # pragma: no cover
"""Create a numpy ndarray of symbols (as an object array).
The created symbols are named ``prefix_i1_i2_``... You should thus provide a
non-empty prefix if you want your symbols to be unique for different output
arrays, as SymPy symbols with identical names are the same object.
Parameters
----------
prefix : string
A prefix prepended to the name of every symbol.
shape : int or tuple
Shape of the created array. If an int, the array is one-dimensional; for
more than one dimension the shape must be a tuple.
\*\*kwargs : dict
keyword arguments passed on to Symbol
Examples
========
These doctests require numpy.
>>> from sympy import symarray
>>> symarray('', 3)
[_0 _1 _2]
If you want multiple symarrays to contain distinct symbols, you *must*
provide unique prefixes:
>>> a = symarray('', 3)
>>> b = symarray('', 3)
>>> a[0] == b[0]
True
>>> a = symarray('a', 3)
>>> b = symarray('b', 3)
>>> a[0] == b[0]
False
Creating symarrays with a prefix:
>>> symarray('a', 3)
[a_0 a_1 a_2]
For more than one dimension, the shape must be given as a tuple:
>>> symarray('a', (2, 3))
[[a_0_0 a_0_1 a_0_2]
[a_1_0 a_1_1 a_1_2]]
>>> symarray('a', (2, 3, 2))
[[[a_0_0_0 a_0_0_1]
[a_0_1_0 a_0_1_1]
[a_0_2_0 a_0_2_1]]
<BLANKLINE>
[[a_1_0_0 a_1_0_1]
[a_1_1_0 a_1_1_1]
[a_1_2_0 a_1_2_1]]]
For setting assumptions of the underlying Symbols:
>>> [s.is_real for s in symarray('a', 2, real=True)]
[True, True]
"""
from numpy import empty, ndindex
arr = empty(shape, dtype=object)
for index in ndindex(shape):
arr[index] = Symbol('%s_%s' % (prefix, '_'.join(map(str, index))),
**kwargs)
return arr
###############
# Functions
###############
def casoratian(seqs, n, zero=True):
"""Given linear difference operator L of order 'k' and homogeneous
equation Ly = 0 we want to compute kernel of L, which is a set
of 'k' sequences: a(n), b(n), ... z(n).
Solutions of L are linearly independent iff their Casoratian,
denoted as C(a, b, ..., z), do not vanish for n = 0.
Casoratian is defined by k x k determinant::
+ a(n) b(n) . . . z(n) +
| a(n+1) b(n+1) . . . z(n+1) |
| . . . . |
| . . . . |
| . . . . |
+ a(n+k-1) b(n+k-1) . . . z(n+k-1) +
It proves very useful in rsolve_hyper() where it is applied
to a generating set of a recurrence to factor out linearly
dependent solutions and return a basis:
>>> from sympy import Symbol, casoratian, factorial
>>> n = Symbol('n', integer=True)
Exponential and factorial are linearly independent:
>>> casoratian([2**n, factorial(n)], n) != 0
True
"""
from .dense import Matrix
seqs = list(map(sympify, seqs))
if not zero:
f = lambda i, j: seqs[j].subs(n, n + i)
else:
f = lambda i, j: seqs[j].subs(n, i)
k = len(seqs)
return Matrix(k, k, f).det()
def eye(n, cls=None):
"""Create square identity matrix n x n
See Also
========
diag
zeros
ones
"""
if cls is None:
from sympy.matrices import Matrix as cls
return cls.eye(n)
def diag(*values, **kwargs):
"""Create a sparse, diagonal matrix from a list of diagonal values.
Notes
=====
When arguments are matrices they are fitted in resultant matrix.
The returned matrix is a mutable, dense matrix. To make it a different
type, send the desired class for keyword ``cls``.
Examples
========
>>> from sympy.matrices import diag, Matrix, ones
>>> diag(1, 2, 3)
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> diag(*[1, 2, 3])
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
The diagonal elements can be matrices; diagonal filling will
continue on the diagonal from the last element of the matrix:
>>> from sympy.abc import x, y, z
>>> a = Matrix([x, y, z])
>>> b = Matrix([[1, 2], [3, 4]])
>>> c = Matrix([[5, 6]])
>>> diag(a, 7, b, c)
Matrix([
[x, 0, 0, 0, 0, 0],
[y, 0, 0, 0, 0, 0],
[z, 0, 0, 0, 0, 0],
[0, 7, 0, 0, 0, 0],
[0, 0, 1, 2, 0, 0],
[0, 0, 3, 4, 0, 0],
[0, 0, 0, 0, 5, 6]])
When diagonal elements are lists, they will be treated as arguments
to Matrix:
>>> diag([1, 2, 3], 4)
Matrix([
[1, 0],
[2, 0],
[3, 0],
[0, 4]])
>>> diag([[1, 2, 3]], 4)
Matrix([
[1, 2, 3, 0],
[0, 0, 0, 4]])
A given band off the diagonal can be made by padding with a
vertical or horizontal "kerning" vector:
>>> hpad = ones(0, 2)
>>> vpad = ones(2, 0)
>>> diag(vpad, 1, 2, 3, hpad) + diag(hpad, 4, 5, 6, vpad)
Matrix([
[0, 0, 4, 0, 0],
[0, 0, 0, 5, 0],
[1, 0, 0, 0, 6],
[0, 2, 0, 0, 0],
[0, 0, 3, 0, 0]])
The type is mutable by default but can be made immutable by setting
the ``mutable`` flag to False:
>>> type(diag(1))
<class 'sympy.matrices.dense.MutableDenseMatrix'>
>>> from sympy.matrices import ImmutableMatrix
>>> type(diag(1, cls=ImmutableMatrix))
<class 'sympy.matrices.immutable.ImmutableMatrix'>
See Also
========
eye
"""
from .sparse import MutableSparseMatrix
cls = kwargs.pop('cls', None)
if cls is None:
from .dense import Matrix as cls
if kwargs:
raise ValueError('unrecognized keyword%s: %s' % (
's' if len(kwargs) > 1 else '',
', '.join(kwargs.keys())))
rows = 0
cols = 0
values = list(values)
for i in range(len(values)):
m = values[i]
if isinstance(m, MatrixBase):
rows += m.rows
cols += m.cols
elif is_sequence(m):
m = values[i] = Matrix(m)
rows += m.rows
cols += m.cols
else:
rows += 1
cols += 1
res = MutableSparseMatrix.zeros(rows, cols)
i_row = 0
i_col = 0
for m in values:
if isinstance(m, MatrixBase):
res[i_row:i_row + m.rows, i_col:i_col + m.cols] = m
i_row += m.rows
i_col += m.cols
else:
res[i_row, i_col] = m
i_row += 1
i_col += 1
return cls._new(res)
def GramSchmidt(vlist, orthonormal=False):
"""
Apply the Gram-Schmidt process to a set of vectors.
see: http://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process
"""
out = []
m = len(vlist)
for i in range(m):
tmp = vlist[i]
for j in range(i):
tmp -= vlist[i].project(out[j])
if not tmp.values():
raise ValueError(
"GramSchmidt: vector set not linearly independent")
out.append(tmp)
if orthonormal:
for i in range(len(out)):
out[i] = out[i].normalized()
return out
def hessian(f, varlist, constraints=[]):
"""Compute Hessian matrix for a function f wrt parameters in varlist
which may be given as a sequence or a row/column vector. A list of
constraints may optionally be given.
Examples
========
>>> from sympy import Function, hessian, pprint
>>> from sympy.abc import x, y
>>> f = Function('f')(x, y)
>>> g1 = Function('g')(x, y)
>>> g2 = x**2 + 3*y
>>> pprint(hessian(f, (x, y), [g1, g2]))
[ d d ]
[ 0 0 --(g(x, y)) --(g(x, y)) ]
[ dx dy ]
[ ]
[ 0 0 2*x 3 ]
[ ]
[ 2 2 ]
[d d d ]
[--(g(x, y)) 2*x ---(f(x, y)) -----(f(x, y))]
[dx 2 dy dx ]
[ dx ]
[ ]
[ 2 2 ]
[d d d ]
[--(g(x, y)) 3 -----(f(x, y)) ---(f(x, y)) ]
[dy dy dx 2 ]
[ dy ]
References
==========
http://en.wikipedia.org/wiki/Hessian_matrix
See Also
========
sympy.matrices.mutable.Matrix.jacobian
wronskian
"""
# f is the expression representing a function f, return regular matrix
if isinstance(varlist, MatrixBase):
if 1 not in varlist.shape:
raise ShapeError("`varlist` must be a column or row vector.")
if varlist.cols == 1:
varlist = varlist.T
varlist = varlist.tolist()[0]
if is_sequence(varlist):
n = len(varlist)
if not n:
raise ShapeError("`len(varlist)` must not be zero.")
else:
raise ValueError("Improper variable list in hessian function")
if not getattr(f, 'diff'):
# check differentiability
raise ValueError("Function `f` (%s) is not differentiable" % f)
m = len(constraints)
N = m + n
out = zeros(N)
for k, g in enumerate(constraints):
if not getattr(g, 'diff'):
# check differentiability
raise ValueError("Function `f` (%s) is not differentiable" % f)
for i in range(n):
out[k, i + m] = g.diff(varlist[i])
for i in range(n):
for j in range(i, n):
out[i + m, j + m] = f.diff(varlist[i]).diff(varlist[j])
for i in range(N):
for j in range(i + 1, N):
out[j, i] = out[i, j]
return out
def jordan_cell(eigenval, n):
"""
Create matrix of Jordan cell kind:
Examples
========
>>> from sympy.matrices import jordan_cell
>>> from sympy.abc import x
>>> jordan_cell(x, 4)
Matrix([
[x, 1, 0, 0],
[0, x, 1, 0],
[0, 0, x, 1],
[0, 0, 0, x]])
"""
n = as_int(n)
out = zeros(n)
for i in range(n - 1):
out[i, i] = eigenval
out[i, i + 1] = S.One
out[n - 1, n - 1] = eigenval
return out
def matrix_multiply_elementwise(A, B):
"""Return the Hadamard product (elementwise product) of A and B
>>> from sympy.matrices import matrix_multiply_elementwise
>>> from sympy.matrices import Matrix
>>> A = Matrix([[0, 1, 2], [3, 4, 5]])
>>> B = Matrix([[1, 10, 100], [100, 10, 1]])
>>> matrix_multiply_elementwise(A, B)
Matrix([
[ 0, 10, 200],
[300, 40, 5]])
See Also
========
__mul__
"""
if A.shape != B.shape:
raise ShapeError()
shape = A.shape
return classof(A, B)._new(shape[0], shape[1],
lambda i, j: A[i, j]*B[i, j])
def ones(r, c=None):
"""Returns a matrix of ones with ``r`` rows and ``c`` columns;
if ``c`` is omitted a square matrix will be returned.
See Also
========
zeros
eye
diag
"""
from .dense import Matrix
c = r if c is None else c
r = as_int(r)
c = as_int(c)
return Matrix(r, c, [S.One]*r*c)
def randMatrix(r, c=None, min=0, max=99, seed=None, symmetric=False,
percent=100, prng=None):
"""Create random matrix with dimensions ``r`` x ``c``. If ``c`` is omitted
the matrix will be square. If ``symmetric`` is True the matrix must be
square. If ``percent`` is less than 100 then only approximately the given
percentage of elements will be non-zero.
The pseudo-random number generator used to generate matrix is chosen in the
following way.
* If ``prng`` is supplied, it will be used as random number generator.
It should be an instance of :class:`random.Random`, or at least have
``randint`` and ``shuffle`` methods with same signatures.
* if ``prng`` is not supplied but ``seed`` is supplied, then new
:class:`random.Random` with given ``seed`` will be created;
* otherwise, a new :class:`random.Random` with default seed will be used.
Examples
========
>>> from sympy.matrices import randMatrix
>>> randMatrix(3) # doctest:+SKIP
[25, 45, 27]
[44, 54, 9]
[23, 96, 46]
>>> randMatrix(3, 2) # doctest:+SKIP
[87, 29]
[23, 37]
[90, 26]
>>> randMatrix(3, 3, 0, 2) # doctest:+SKIP
[0, 2, 0]
[2, 0, 1]
[0, 0, 1]
>>> randMatrix(3, symmetric=True) # doctest:+SKIP
[85, 26, 29]
[26, 71, 43]
[29, 43, 57]
>>> A = randMatrix(3, seed=1)
>>> B = randMatrix(3, seed=2)
>>> A == B # doctest:+SKIP
False
>>> A == randMatrix(3, seed=1)
True
>>> randMatrix(3, symmetric=True, percent=50) # doctest:+SKIP
[0, 68, 43]
[0, 68, 0]
[0, 91, 34]
"""
if c is None:
c = r
# Note that ``Random()`` is equivalent to ``Random(None)``
prng = prng or random.Random(seed)
if symmetric and r != c:
raise ValueError(
'For symmetric matrices, r must equal c, but %i != %i' % (r, c))
if not symmetric:
m = Matrix._new(r, c, lambda i, j: prng.randint(min, max))
else:
m = zeros(r)
for i in range(r):
for j in range(i, r):
m[i, j] = prng.randint(min, max)
for i in range(r):
for j in range(i):
m[i, j] = m[j, i]
if percent == 100:
return m
else:
z = int(r*c*percent // 100)
m._mat[:z] = [S.Zero]*z
prng.shuffle(m._mat)
return m
def wronskian(functions, var, method='bareis'):
"""
Compute Wronskian for [] of functions
::
| f1 f2 ... fn |
| f1' f2' ... fn' |
| . . . . |
W(f1, ..., fn) = | . . . . |
| . . . . |
| (n) (n) (n) |
| D (f1) D (f2) ... D (fn) |
see: http://en.wikipedia.org/wiki/Wronskian
See Also
========
sympy.matrices.mutable.Matrix.jacobian
hessian
"""
from .dense import Matrix
for index in range(0, len(functions)):
functions[index] = sympify(functions[index])
n = len(functions)
if n == 0:
return 1
W = Matrix(n, n, lambda i, j: functions[i].diff(var, j))
return W.det(method)
def zeros(r, c=None, cls=None):
"""Returns a matrix of zeros with ``r`` rows and ``c`` columns;
if ``c`` is omitted a square matrix will be returned.
See Also
========
ones
eye
diag
"""
if cls is None:
from .dense import Matrix as cls
return cls.zeros(r, c)
|
aktech/sympy
|
sympy/matrices/dense.py
|
Python
|
bsd-3-clause
| 44,552
|
#Operation 3067
#A game that involves exploration, building, and attack.
#Written by vanquished
#Licenced under the GPL v3 licence.
VERSION = "0.1"
try:
import pygame, sys
from pygame.locals import *
except ImportError, err:
print "Could not load module. {0}".format(err)
def main():
#Initialise screen
pygame.init()
screen = pygame.display.set_mode((800, 540))
pygame.display.set_caption("Operation 3067 version "+ VERSION)
#Background
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((1, 136, 186))
# Rest of code down here
screen.blit(background, (0,0))
pygame.display.flip()
#The main loop
while True:
#events
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
#state
#draw on screen
if __name__ == "__main__":
main()
|
vanquished/operation-3067
|
operation-3067.py
|
Python
|
gpl-3.0
| 993
|
import unittest
from the_ark.field_handlers import FieldHandler, FieldHandlerException, SeleniumError, MissingKey, UnknownFieldType
from the_ark import selenium_helpers
from mock import patch
class FieldHandlerTestCase(unittest.TestCase):
def setUp(self):
self.instantiate_field_handler()
@patch("the_ark.selenium_helpers.SeleniumHelpers")
def instantiate_field_handler(self, selenium_helper):
fake_driver = "driver"
self.sh = selenium_helper(fake_driver)
self.fh = FieldHandler(self.sh)
# ===================================================================
# --- dispatch_field() tests
# ===================================================================
# - Text Field
@patch("the_ark.field_handlers.FieldHandler.handle_text")
def test_dispatch_text_field_without_confirm(self, text_method):
field_data = {
"type": "STRING",
"css_selector": "#field",
"input": "text"
}
text_method.return_value = "text"
self.fh.dispatch_field(field_data)
text_method.assert_called_once_with(field_data["css_selector"], field_data["input"], None)
@patch("the_ark.field_handlers.FieldHandler.handle_text")
def test_dispatch_text_field_with_confirm(self, text_method):
field_data = {
"type": "EMAIL",
"css_selector": "#field",
"input": "text",
"confirm_css_selector": "#confirm-field"
}
text_method.return_value = "text"
self.fh.dispatch_field(field_data)
text_method.assert_called_once_with(field_data["css_selector"],
field_data["input"],
field_data["confirm_css_selector"])
# - Check Box Field
@patch("the_ark.field_handlers.FieldHandler.handle_check_box")
def test_dispatch_check_box_field(self, check_box_method):
field_data = {
"type": "CHECK_BOX",
"enum": [{"#agree": "Agree"}, {"#disagree": "Disagree"}],
"input": [1]
}
check_box_method.return_value = "text"
self.sh.fill_an_element.return_value = "text"
self.fh.dispatch_field(field_data)
check_box_method.assert_called_once_with(field_data["enum"], field_data["input"])
# - Radio Button Field
@patch("the_ark.field_handlers.FieldHandler.handle_radio_button")
def test_dispatch_radio_button_field(self, radio_button_method):
field_data = {
"type": "RADIO",
"enum": [{"#agree": "Agree"}, {"#disagree": "Disagree"}],
"input": 1
}
radio_button_method.return_value = "text"
self.fh.dispatch_field(field_data)
radio_button_method.assert_called_once_with(field_data["enum"], field_data["input"])
# - Select Field
@patch("the_ark.field_handlers.FieldHandler.handle_select")
def test_dispatch_select_field_without_first_valid(self, select_method):
field_data = {
"type": "SELECT",
"css_selector": "#state",
"enum": ["Agree", "Disagree"],
"input": 1
}
select_method.return_value = "text"
self.fh.dispatch_field(field_data)
select_method.assert_called_once_with(field_data["css_selector"], field_data["input"], False)
@patch("the_ark.field_handlers.FieldHandler.handle_select")
def test_dispatch_select_field_with_first_valid_as_true(self, select_method):
field_data = {
"type": "SELECT",
"css_selector": "#state",
"enum": ["Agree", "Disagree"],
"input": 30,
"first_valid": True
}
select_method.return_value = "text"
self.fh.dispatch_field(field_data)
select_method.assert_called_once_with(field_data["css_selector"],
field_data["input"],
field_data["first_valid"])
@patch("the_ark.field_handlers.FieldHandler.handle_select")
def test_dispatch_select_field_with_first_valid_as_false(self, select_method):
field_data = {
"type": "SELECT",
"css_selector": "#state",
"enum": ["Agree", "Disagree"],
"input": 30,
"first_valid": False
}
select_method.return_value = "text"
self.fh.dispatch_field(field_data)
select_method.assert_called_once_with(field_data["css_selector"],
field_data["input"],
field_data["first_valid"])
# - Drop Down Field
@patch("the_ark.field_handlers.FieldHandler.handle_drop_down")
def test_dispatch_drop_down_field(self, drop_down_method):
field_data = {
"type": "DROP_DOWN",
"css_selector": "#food",
"enum": [{"#pizza": "Pizza"}, {"#applesauce": "Applesauce"}],
"input": [0, 1]
}
drop_down_method.return_value = "text"
self.fh.dispatch_field(field_data)
drop_down_method.assert_called_once_with(field_data["css_selector"], field_data["enum"], field_data["input"])
# - Button Field
@patch("the_ark.field_handlers.FieldHandler.handle_button")
def test_dispatch_button_field(self, button_method):
field_data = {
"type": "BUTTON",
"css_selector": "#food"
}
button_method.return_value = "text"
self.fh.dispatch_field(field_data)
button_method.assert_called_once_with(field_data["css_selector"])
# - Exceptions
# Unknown Type
def test_dispatch_with_unknown_field_type(self):
field_data = {
"type": "Unavailable",
}
with self.assertRaises(UnknownFieldType) as error_message:
self.fh.dispatch_field(field_data)
# Check that the else statement is called by verifying the exception text contains the word "unknown"
self.assertIn(field_data["type"], error_message.exception.msg)
# FieldHandlerException()
@patch("the_ark.field_handlers.FieldHandler.handle_text")
def test_dispatch_field_handler_exception_without_name(self, text_method):
field_data = {
"type": "STRING",
"css_selector": "#field",
"input": "text"
}
text_method.side_effect = FieldHandlerException("Boo!")
with self.assertRaises(FieldHandlerException) as error_message:
self.fh.dispatch_field(field_data)
self.assertNotIn("named", error_message.exception.msg)
@patch("the_ark.field_handlers.FieldHandler.handle_text")
def test_dispatch_field_handler_exception_with_name(self, text_method):
field_data = {
"type": "STRING",
"css_selector": "#field",
"input": "text",
"name": "First Name"
}
text_method.side_effect = FieldHandlerException("Boo!")
with self.assertRaises(FieldHandlerException) as error_message:
self.fh.dispatch_field(field_data)
self.assertIn(field_data["name"], error_message.exception.msg)
# KeyError()
def test_dispatch_key_error_without_name(self):
field_data = {
"css_selector": "#field",
"input": "text",
}
with self.assertRaises(FieldHandlerException) as error_message:
self.fh.dispatch_field(field_data)
self.assertEquals("'type'", error_message.exception.details["missing_key"])
self.assertNotIn("named", error_message.exception.msg)
def test_dispatch_key_error_with_name(self):
field_data = {
"css_selector": "#field",
"input": "text",
"name": "First Name"
}
with self.assertRaises(FieldHandlerException) as error_message:
self.fh.dispatch_field(field_data)
self.assertIn(field_data["name"], error_message.exception.msg)
self.assertEquals("'type'", error_message.exception.key)
# General Exception
@patch("the_ark.field_handlers.FieldHandler.handle_text")
def test_dispatch_general_exception_without_name(self, text_method):
field_data = {
"type": "STRING",
"css_selector": "#field",
"input": "text"
}
text_method.side_effect = Exception("Boo!")
with self.assertRaises(FieldHandlerException) as error_message:
self.fh.dispatch_field(field_data)
self.assertIn("Unhandled", error_message.exception.msg)
@patch("the_ark.field_handlers.FieldHandler.handle_text")
def test_dispatch_general_exception_with_name(self, text_method):
field_data = {
"type": "STRING",
"css_selector": "#field",
"input": "text",
"name": "First Name"
}
text_method.side_effect = Exception("Boo!")
with self.assertRaises(FieldHandlerException) as error_message:
self.fh.dispatch_field(field_data)
self.assertIn(field_data["name"], error_message.exception.msg)
# ===================================================================
# --- Field Handler methods
# ===================================================================
# - Handle Text
def test_handle_text_without_confirm(self):
self.fh.handle_text("selector", "input text")
self.sh.fill_an_element.assert_called_once_with(css_selector="selector", fill_text="input text")
def test_handle_text_with_confirm(self):
css_selector = "selector"
input_text = "input text"
confirm_css_selector = "confirm"
self.fh.handle_text(css_selector, input_text, confirm_css_selector)
self.sh.fill_an_element.assert_called_with(css_selector=confirm_css_selector, fill_text=input_text)
def test_handle_text_selenium_exception(self):
self.sh.fill_an_element.side_effect = selenium_helpers.SeleniumHelperExceptions("message text",
"stacktrace",
"www.google.com")
with self.assertRaises(SeleniumError):
self.fh.handle_text("selector", "input text")
def test_handle_text_general_exception(self):
self.sh.fill_an_element.side_effect = Exception("Boo!")
with self.assertRaises(FieldHandlerException):
self.fh.handle_text("selector", "input text")
# - Handle Check Box
def test_handle_check_box(self):
enum = [{"css_selector": "selector.1"}, {"css_selector": "selector.2"}]
self.fh.handle_check_box(enum, [0, 1])
self.sh.click_an_element.assert_called_with(css_selector=enum[1]["css_selector"])
def test_handle_check_box_key_error(self):
enum = [{"bad_key": "selector.1"}, {"css_selector": "selector.2"}]
with self.assertRaises(MissingKey):
self.fh.handle_check_box(enum, [0, 1])
def test_handle_check_box_selenium_exception(self):
enum = [{"css_selector": "selector.1"}, {"css_selector": "selector.2"}]
self.sh.click_an_element.side_effect = selenium_helpers.SeleniumHelperExceptions("message text",
"stacktrace",
"www.google.com")
with self.assertRaises(SeleniumError):
self.fh.handle_check_box(enum, [0, 1])
def test_handle_check_box_general_exception(self):
css_selector = "selenium"
input_text = "input text"
with self.assertRaises(FieldHandlerException) as error_message:
self.fh.handle_check_box(css_selector, input_text)
self.assertIn("Unhandled", error_message.exception.msg)
# - Handle Radio Button
def test_handle_radio_button(self):
enum = [{"css_selector": "selector.1"}, {"css_selector": "selector.2"}]
self.fh.handle_radio_button(enum, 1)
self.sh.click_an_element.assert_called_with(css_selector=enum[1]["css_selector"])
def test_handle_radio_button_key_error(self):
enum = [{"bad_key": "selector.1"}, {"css_selector": "selector.1"}]
with self.assertRaises(MissingKey):
self.fh.handle_radio_button(enum, 0)
def test_handle_radio_button_selenium_exception(self):
enum = [{"css_selector": "selector.1"}, {"css_selector": "selector.1"}]
self.sh.click_an_element.side_effect = selenium_helpers.SeleniumHelperExceptions("message text",
"stacktrace",
"www.google.com")
with self.assertRaises(SeleniumError):
self.fh.handle_radio_button(enum, 1)
def test_handle_radio_button_general_exception(self):
css_selector = "select.1"
input_text = "input text"
with self.assertRaises(FieldHandlerException) as error_message:
self.fh.handle_radio_button(css_selector, input_text)
self.assertIn("Unhandled", error_message.exception.msg)
# - Handle Select
def test_handle_select_with_first_invalid(self):
selector = "select.1"
self.fh.handle_select(selector, 1)
self.sh.click_an_element.assert_called_once_with(css_selector="{0} option:nth-child({1})".format(selector, 3))
def test_handle_select_with_first_valid(self):
selector = "select.1"
self.fh.handle_select(selector, 1, True)
self.sh.click_an_element.assert_called_once_with(css_selector="{0} option:nth-child({1})".format(selector, 2))
def test_handle_select_selenium_exception(self):
self.sh.click_an_element.side_effect = selenium_helpers.SeleniumHelperExceptions("message text",
"stacktrace",
"www.google.com")
with self.assertRaises(SeleniumError):
self.fh.handle_select("select.1", 1)
def test_handle_select_general_exception(self):
self.sh.click_an_element.side_effect = Exception("Boo!")
with self.assertRaises(FieldHandlerException) as error_message:
self.fh.handle_select("select.1", 1)
self.assertIn("Unhandled", error_message.exception.msg)
# - Handle Drop Down
def test_handle_drop_down(self):
css_selector = "selector.1"
enum = [{"css_selector": "selector.1"}, {"css_selector": "selector.2"}]
self.fh.handle_drop_down(css_selector, enum, 1)
self.sh.click_an_element.assert_called_with(css_selector=enum[1]["css_selector"])
def test_handle_drop_down_key_error(self):
css_selector = "selector.1"
enum = [{"bad_key": "selector.1"}, {"css_selector": "selector.1"}]
with self.assertRaises(MissingKey):
self.fh.handle_drop_down(css_selector, enum, 0)
def test_handle_drop_down_selenium_exception(self):
css_selector = "selector.1"
enum = [{"css_selector": "selector.1"}, {"css_selector": "selector.1"}]
self.sh.click_an_element.side_effect = selenium_helpers.SeleniumHelperExceptions("message text",
"stacktrace",
"www.google.com")
with self.assertRaises(SeleniumError):
self.fh.handle_drop_down(css_selector, enum, 1)
def test_handle_drop_down_general_exception(self):
css_selector = "select.1"
with self.assertRaises(FieldHandlerException) as error_message:
self.fh.handle_drop_down(css_selector, "break!", "break!")
self.assertIn("Unhandled", error_message.exception.msg)
# - Handle Button
def test_handle_button(self):
css_selector = "selector.1"
self.fh.handle_button(css_selector)
self.sh.click_an_element.assert_called_with(css_selector=css_selector)
def test_handle_button_selenium_exception(self):
css_selector = "selector.1"
self.sh.click_an_element.side_effect = selenium_helpers.SeleniumHelperExceptions("message text",
"stacktrace",
"www.google.com")
with self.assertRaises(SeleniumError):
self.fh.handle_button(css_selector)
def test_handle_button_general_exception(self):
css_selector = "select.1"
self.sh.click_an_element.side_effect = Exception("Boo!")
with self.assertRaises(FieldHandlerException) as error_message:
self.fh.handle_button(css_selector)
self.assertIn("Unhandled", error_message.exception.msg)
# ===================================================================
# --- Field Handler Exception
# ===================================================================
def test_field_handler_exception_to_string_without_details(self):
field_handler = FieldHandlerException("Message text")
error_string = field_handler.__str__()
self.assertNotIn("stacktrace", error_string)
def test_field_handler_exception_to_string_with_details(self):
field_handler = FieldHandlerException("message",
"stacktrace:\nLine 1\nLine 2",
{"css_selector": "selector.1"})
error_string = field_handler.__str__()
self.assertIn("css_selector", error_string)
self.assertIn("stacktrace", error_string)
|
meltmedia/the-ark
|
tests/test_field_handler.py
|
Python
|
apache-2.0
| 17,996
|
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# tcpconnect Trace TCP connect()s.
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: tcpconnect [-h] [-c] [-t] [-p PID] [-P PORT [PORT ...]] [-4 | -6]
#
# All connection attempts are traced, even if they ultimately fail.
#
# This uses dynamic tracing of kernel functions, and will need to be updated
# to match kernel changes.
#
# Copyright (c) 2015 Brendan Gregg.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 25-Sep-2015 Brendan Gregg Created this.
# 14-Feb-2016 " " Switch to bpf_perf_output.
# 09-Jan-2019 Takuma Kume Support filtering by UID
# 30-Jul-2019 Xiaozhou Liu Count connects.
# 07-Oct-2020 Nabil Schear Correlate connects with DNS responses
# 08-Mar-2021 Suresh Kumar Added LPORT option
from __future__ import print_function
from bcc import BPF
from bcc.containers import filter_by_containers
from bcc.utils import printb
import argparse
from socket import inet_ntop, ntohs, AF_INET, AF_INET6
from struct import pack
from time import sleep
from datetime import datetime
# arguments
examples = """examples:
./tcpconnect # trace all TCP connect()s
./tcpconnect -t # include timestamps
./tcpconnect -d # include DNS queries associated with connects
./tcpconnect -p 181 # only trace PID 181
./tcpconnect -P 80 # only trace port 80
./tcpconnect -P 80,81 # only trace port 80 and 81
./tcpconnect -4 # only trace IPv4 family
./tcpconnect -6 # only trace IPv6 family
./tcpconnect -U # include UID
./tcpconnect -u 1000 # only trace UID 1000
./tcpconnect -c # count connects per src ip and dest ip/port
./tcpconnect -L # include LPORT while printing outputs
./tcpconnect --cgroupmap mappath # only trace cgroups in this BPF map
./tcpconnect --mntnsmap mappath # only trace mount namespaces in the map
"""
parser = argparse.ArgumentParser(
description="Trace TCP connects",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-t", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("-P", "--port",
help="comma-separated list of destination ports to trace.")
group = parser.add_mutually_exclusive_group()
group.add_argument("-4", "--ipv4", action="store_true",
help="trace IPv4 family only")
group.add_argument("-6", "--ipv6", action="store_true",
help="trace IPv6 family only")
parser.add_argument("-L", "--lport", action="store_true",
help="include LPORT on output")
parser.add_argument("-U", "--print-uid", action="store_true",
help="include UID on output")
parser.add_argument("-u", "--uid",
help="trace this UID only")
parser.add_argument("-c", "--count", action="store_true",
help="count connects per src ip and dest ip/port")
parser.add_argument("--cgroupmap",
help="trace cgroups in this BPF map only")
parser.add_argument("--mntnsmap",
help="trace mount namespaces in this BPF map only")
parser.add_argument("-d", "--dns", action="store_true",
help="include likely DNS query associated with each connect")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <net/sock.h>
#include <bcc/proto.h>
BPF_HASH(currsock, u32, struct sock *);
// separate data structs for ipv4 and ipv6
struct ipv4_data_t {
u64 ts_us;
u32 pid;
u32 uid;
u32 saddr;
u32 daddr;
u64 ip;
u16 lport;
u16 dport;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv4_events);
struct ipv6_data_t {
u64 ts_us;
u32 pid;
u32 uid;
unsigned __int128 saddr;
unsigned __int128 daddr;
u64 ip;
u16 lport;
u16 dport;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv6_events);
// separate flow keys per address family
struct ipv4_flow_key_t {
u32 saddr;
u32 daddr;
u16 dport;
};
BPF_HASH(ipv4_count, struct ipv4_flow_key_t);
struct ipv6_flow_key_t {
unsigned __int128 saddr;
unsigned __int128 daddr;
u16 dport;
};
BPF_HASH(ipv6_count, struct ipv6_flow_key_t);
int trace_connect_entry(struct pt_regs *ctx, struct sock *sk)
{
if (container_should_be_filtered()) {
return 0;
}
u64 pid_tgid = bpf_get_current_pid_tgid();
u32 pid = pid_tgid >> 32;
u32 tid = pid_tgid;
FILTER_PID
u32 uid = bpf_get_current_uid_gid();
FILTER_UID
// stash the sock ptr for lookup on return
currsock.update(&tid, &sk);
return 0;
};
static int trace_connect_return(struct pt_regs *ctx, short ipver)
{
int ret = PT_REGS_RC(ctx);
u64 pid_tgid = bpf_get_current_pid_tgid();
u32 pid = pid_tgid >> 32;
u32 tid = pid_tgid;
struct sock **skpp;
skpp = currsock.lookup(&tid);
if (skpp == 0) {
return 0; // missed entry
}
if (ret != 0) {
// failed to send SYNC packet, may not have populated
// socket __sk_common.{skc_rcv_saddr, ...}
currsock.delete(&tid);
return 0;
}
// pull in details
struct sock *skp = *skpp;
u16 lport = skp->__sk_common.skc_num;
u16 dport = skp->__sk_common.skc_dport;
FILTER_PORT
FILTER_FAMILY
if (ipver == 4) {
IPV4_CODE
} else /* 6 */ {
IPV6_CODE
}
currsock.delete(&tid);
return 0;
}
int trace_connect_v4_return(struct pt_regs *ctx)
{
return trace_connect_return(ctx, 4);
}
int trace_connect_v6_return(struct pt_regs *ctx)
{
return trace_connect_return(ctx, 6);
}
"""
struct_init = {'ipv4':
{'count':
"""
struct ipv4_flow_key_t flow_key = {};
flow_key.saddr = skp->__sk_common.skc_rcv_saddr;
flow_key.daddr = skp->__sk_common.skc_daddr;
flow_key.dport = ntohs(dport);
ipv4_count.increment(flow_key);""",
'trace':
"""
struct ipv4_data_t data4 = {.pid = pid, .ip = ipver};
data4.uid = bpf_get_current_uid_gid();
data4.ts_us = bpf_ktime_get_ns() / 1000;
data4.saddr = skp->__sk_common.skc_rcv_saddr;
data4.daddr = skp->__sk_common.skc_daddr;
data4.lport = lport;
data4.dport = ntohs(dport);
bpf_get_current_comm(&data4.task, sizeof(data4.task));
ipv4_events.perf_submit(ctx, &data4, sizeof(data4));"""
},
'ipv6':
{'count':
"""
struct ipv6_flow_key_t flow_key = {};
bpf_probe_read_kernel(&flow_key.saddr, sizeof(flow_key.saddr),
skp->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
bpf_probe_read_kernel(&flow_key.daddr, sizeof(flow_key.daddr),
skp->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
flow_key.dport = ntohs(dport);
ipv6_count.increment(flow_key);""",
'trace':
"""
struct ipv6_data_t data6 = {.pid = pid, .ip = ipver};
data6.uid = bpf_get_current_uid_gid();
data6.ts_us = bpf_ktime_get_ns() / 1000;
bpf_probe_read_kernel(&data6.saddr, sizeof(data6.saddr),
skp->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
bpf_probe_read_kernel(&data6.daddr, sizeof(data6.daddr),
skp->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
data6.lport = lport;
data6.dport = ntohs(dport);
bpf_get_current_comm(&data6.task, sizeof(data6.task));
ipv6_events.perf_submit(ctx, &data6, sizeof(data6));"""
}
}
# This defines an additional BPF program that instruments udp_recvmsg system
# call to locate DNS response packets on UDP port 53. When these packets are
# located, the data is copied to user-space where python will parse them with
# dnslib.
#
# uses a percpu array of length 1 to store the dns_data_t off the stack to
# allow for a maximum DNS packet length of 512 bytes.
dns_bpf_text = """
#include <net/inet_sock.h>
#define MAX_PKT 512
struct dns_data_t {
u8 pkt[MAX_PKT];
};
BPF_PERF_OUTPUT(dns_events);
// store msghdr pointer captured on syscall entry to parse on syscall return
BPF_HASH(tbl_udp_msg_hdr, u64, struct msghdr *);
// single element per-cpu array to hold the current event off the stack
BPF_PERCPU_ARRAY(dns_data,struct dns_data_t,1);
int trace_udp_recvmsg(struct pt_regs *ctx)
{
__u64 pid_tgid = bpf_get_current_pid_tgid();
struct sock *sk = (struct sock *)PT_REGS_PARM1(ctx);
struct inet_sock *is = inet_sk(sk);
// only grab port 53 packets, 13568 is ntohs(53)
if (is->inet_dport == 13568) {
struct msghdr *msghdr = (struct msghdr *)PT_REGS_PARM2(ctx);
tbl_udp_msg_hdr.update(&pid_tgid, &msghdr);
}
return 0;
}
int trace_udp_ret_recvmsg(struct pt_regs *ctx)
{
__u64 pid_tgid = bpf_get_current_pid_tgid();
u32 zero = 0;
struct msghdr **msgpp = tbl_udp_msg_hdr.lookup(&pid_tgid);
if (msgpp == 0)
return 0;
struct msghdr *msghdr = (struct msghdr *)*msgpp;
if (msghdr->msg_iter.type != ITER_IOVEC)
goto delete_and_return;
int copied = (int)PT_REGS_RC(ctx);
if (copied < 0)
goto delete_and_return;
size_t buflen = (size_t)copied;
if (buflen > msghdr->msg_iter.iov->iov_len)
goto delete_and_return;
if (buflen > MAX_PKT)
buflen = MAX_PKT;
struct dns_data_t *data = dns_data.lookup(&zero);
if (!data) // this should never happen, just making the verifier happy
return 0;
void *iovbase = msghdr->msg_iter.iov->iov_base;
bpf_probe_read(data->pkt, buflen, iovbase);
dns_events.perf_submit(ctx, data, buflen);
delete_and_return:
tbl_udp_msg_hdr.delete(&pid_tgid);
return 0;
}
"""
if args.count and args.dns:
print("Error: you may not specify -d/--dns with -c/--count.")
exit()
# code substitutions
if args.count:
bpf_text = bpf_text.replace("IPV4_CODE", struct_init['ipv4']['count'])
bpf_text = bpf_text.replace("IPV6_CODE", struct_init['ipv6']['count'])
else:
bpf_text = bpf_text.replace("IPV4_CODE", struct_init['ipv4']['trace'])
bpf_text = bpf_text.replace("IPV6_CODE", struct_init['ipv6']['trace'])
if args.pid:
bpf_text = bpf_text.replace('FILTER_PID',
'if (pid != %s) { return 0; }' % args.pid)
if args.port:
dports = [int(dport) for dport in args.port.split(',')]
dports_if = ' && '.join(['dport != %d' % ntohs(dport) for dport in dports])
bpf_text = bpf_text.replace('FILTER_PORT',
'if (%s) { currsock.delete(&tid); return 0; }' % dports_if)
if args.ipv4:
bpf_text = bpf_text.replace('FILTER_FAMILY',
'if (ipver != 4) { return 0; }')
elif args.ipv6:
bpf_text = bpf_text.replace('FILTER_FAMILY',
'if (ipver != 6) { return 0; }')
if args.uid:
bpf_text = bpf_text.replace('FILTER_UID',
'if (uid != %s) { return 0; }' % args.uid)
bpf_text = filter_by_containers(args) + bpf_text
bpf_text = bpf_text.replace('FILTER_PID', '')
bpf_text = bpf_text.replace('FILTER_PORT', '')
bpf_text = bpf_text.replace('FILTER_FAMILY', '')
bpf_text = bpf_text.replace('FILTER_UID', '')
if args.dns:
bpf_text += dns_bpf_text
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
# process event
def print_ipv4_event(cpu, data, size):
event = b["ipv4_events"].event(data)
global start_ts
if args.timestamp:
if start_ts == 0:
start_ts = event.ts_us
printb(b"%-9.3f" % ((float(event.ts_us) - start_ts) / 1000000), nl="")
if args.print_uid:
printb(b"%-6d" % event.uid, nl="")
dest_ip = inet_ntop(AF_INET, pack("I", event.daddr)).encode()
if args.lport:
printb(b"%-6d %-12.12s %-2d %-16s %-6d %-16s %-6d %s" % (event.pid,
event.task, event.ip,
inet_ntop(AF_INET, pack("I", event.saddr)).encode(), event.lport,
dest_ip, event.dport, print_dns(dest_ip)))
else:
printb(b"%-6d %-12.12s %-2d %-16s %-16s %-6d %s" % (event.pid,
event.task, event.ip,
inet_ntop(AF_INET, pack("I", event.saddr)).encode(),
dest_ip, event.dport, print_dns(dest_ip)))
def print_ipv6_event(cpu, data, size):
event = b["ipv6_events"].event(data)
global start_ts
if args.timestamp:
if start_ts == 0:
start_ts = event.ts_us
printb(b"%-9.3f" % ((float(event.ts_us) - start_ts) / 1000000), nl="")
if args.print_uid:
printb(b"%-6d" % event.uid, nl="")
dest_ip = inet_ntop(AF_INET6, event.daddr).encode()
if args.lport:
printb(b"%-6d %-12.12s %-2d %-16s %-6d %-16s %-6d %s" % (event.pid,
event.task, event.ip,
inet_ntop(AF_INET6, event.saddr).encode(), event.lport,
dest_ip, event.dport, print_dns(dest_ip)))
else:
printb(b"%-6d %-12.12s %-2d %-16s %-16s %-6d %s" % (event.pid,
event.task, event.ip,
inet_ntop(AF_INET6, event.saddr).encode(),
dest_ip, event.dport, print_dns(dest_ip)))
def depict_cnt(counts_tab, l3prot='ipv4'):
for k, v in sorted(counts_tab.items(),
key=lambda counts: counts[1].value, reverse=True):
depict_key = ""
if l3prot == 'ipv4':
depict_key = "%-25s %-25s %-20s" % \
((inet_ntop(AF_INET, pack('I', k.saddr))),
inet_ntop(AF_INET, pack('I', k.daddr)), k.dport)
else:
depict_key = "%-25s %-25s %-20s" % \
((inet_ntop(AF_INET6, k.saddr)),
inet_ntop(AF_INET6, k.daddr), k.dport)
print("%s %-10d" % (depict_key, v.value))
def print_dns(dest_ip):
if not args.dns:
return b""
dnsname, timestamp = dns_cache.get(dest_ip, (None, None))
if timestamp is not None:
diff = datetime.now() - timestamp
diff = float(diff.seconds) * 1000 + float(diff.microseconds) / 1000
else:
diff = 0
if dnsname is None:
dnsname = b"No DNS Query"
if dest_ip == b"127.0.0.1" or dest_ip == b"::1":
dnsname = b"localhost"
retval = b"%s" % dnsname
if diff > DELAY_DNS:
retval += b" (%.3fms)" % diff
return retval
if args.dns:
try:
import dnslib
from cachetools import TTLCache
except ImportError:
print("Error: The python packages dnslib and cachetools are required "
"to use the -d/--dns option.")
print("Install this package with:")
print("\t$ pip3 install dnslib cachetools")
print(" or")
print("\t$ sudo apt-get install python3-dnslib python3-cachetools "
"(on Ubuntu 18.04+)")
exit(1)
# 24 hours
DEFAULT_TTL = 86400
# Cache Size in entries
DNS_CACHE_SIZE = 10240
# delay in ms in which to warn users of long delay between the query
# and the connect that used the IP
DELAY_DNS = 100
dns_cache = TTLCache(maxsize=DNS_CACHE_SIZE, ttl=DEFAULT_TTL)
# process event
def save_dns(cpu, data, size):
event = b["dns_events"].event(data)
payload = event.pkt[:size]
# pass the payload to dnslib for parsing
dnspkt = dnslib.DNSRecord.parse(payload)
# lets only look at responses
if dnspkt.header.qr != 1:
return
# must be some questions in there
if dnspkt.header.q != 1:
return
# make sure there are answers
if dnspkt.header.a == 0 and dnspkt.header.aa == 0:
return
# lop off the trailing .
question = ("%s" % dnspkt.q.qname)[:-1].encode('utf-8')
for answer in dnspkt.rr:
# skip all but A and AAAA records
if answer.rtype == 1 or answer.rtype == 28:
dns_cache[str(answer.rdata).encode('utf-8')] = (question,
datetime.now())
# initialize BPF
b = BPF(text=bpf_text)
b.attach_kprobe(event="tcp_v4_connect", fn_name="trace_connect_entry")
b.attach_kprobe(event="tcp_v6_connect", fn_name="trace_connect_entry")
b.attach_kretprobe(event="tcp_v4_connect", fn_name="trace_connect_v4_return")
b.attach_kretprobe(event="tcp_v6_connect", fn_name="trace_connect_v6_return")
if args.dns:
b.attach_kprobe(event="udp_recvmsg", fn_name="trace_udp_recvmsg")
b.attach_kretprobe(event="udp_recvmsg", fn_name="trace_udp_ret_recvmsg")
print("Tracing connect ... Hit Ctrl-C to end")
if args.count:
try:
while True:
sleep(99999999)
except KeyboardInterrupt:
pass
# header
print("\n%-25s %-25s %-20s %-10s" % (
"LADDR", "RADDR", "RPORT", "CONNECTS"))
depict_cnt(b["ipv4_count"])
depict_cnt(b["ipv6_count"], l3prot='ipv6')
# read events
else:
# header
if args.timestamp:
print("%-9s" % ("TIME(s)"), end="")
if args.print_uid:
print("%-6s" % ("UID"), end="")
if args.lport:
print("%-6s %-12s %-2s %-16s %-6s %-16s %-6s" % ("PID", "COMM", "IP", "SADDR",
"LPORT", "DADDR", "DPORT"), end="")
else:
print("%-6s %-12s %-2s %-16s %-16s %-6s" % ("PID", "COMM", "IP", "SADDR",
"DADDR", "DPORT"), end="")
if args.dns:
print(" QUERY")
else:
print()
start_ts = 0
# read events
b["ipv4_events"].open_perf_buffer(print_ipv4_event)
b["ipv6_events"].open_perf_buffer(print_ipv6_event)
if args.dns:
b["dns_events"].open_perf_buffer(save_dns)
while True:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
|
brendangregg/bcc
|
tools/tcpconnect.py
|
Python
|
apache-2.0
| 17,972
|
"""
Interact with the world:
- swing the arm, sneak, sprint, jump with a horse, leave the bed
- look around
- dig/place/use blocks
- use the held (active) item
- use/attack entities
- steer vehicles
- edit and sign books
By default, the client sends swing and look packets like the vanilla client.
This can be disabled by setting the ``auto_swing`` and ``auto_look`` flags.
"""
from spockbot.mcdata import constants
from spockbot.mcp import nbt
from spockbot.mcp.proto import MC_SLOT
from spockbot.plugins.base import PluginBase, pl_announce
from spockbot.vector import Vector3
@pl_announce('Interact')
class InteractPlugin(PluginBase):
requires = ('ClientInfo', 'Inventory', 'Net', 'Channels')
def __init__(self, ploader, settings):
super(InteractPlugin, self).__init__(ploader, settings)
ploader.provides('Interact', self)
self.sneaking = False
self.sprinting = False
self.dig_pos_dict = {'x': 0, 'y': 0, 'z': 0}
self.auto_swing = True # move arm when clicking
self.auto_look = True # look at clicked things
def swing_arm(self):
self.net.push_packet('PLAY>Animation', {})
def _entity_action(self, action, jump_boost=100):
entity_id = self.clientinfo.eid
self.net.push_packet('PLAY>Entity Action', {
'eid': entity_id,
'action': action,
'jump_boost': jump_boost,
})
def leave_bed(self):
self._entity_action(constants.ENTITY_ACTION_LEAVE_BED)
def sneak(self, sneak=True):
self._entity_action(constants.ENTITY_ACTION_SNEAK
if sneak else constants.ENTITY_ACTION_UNSNEAK)
self.sneaking = sneak
def unsneak(self):
self.sneak(False)
def sprint(self, sprint=True):
self._entity_action(constants.ENTITY_ACTION_START_SPRINT if sprint
else constants.ENTITY_ACTION_STOP_SPRINT)
self.sprinting = sprint
def unsprint(self):
self.sprint(False)
def jump_horse(self, jump_boost=100):
self._entity_action(constants.ENTITY_ACTION_JUMP_HORSE, jump_boost)
def open_inventory(self):
self._entity_action(constants.ENTITY_ACTION_OPEN_INVENTORY)
def look(self, yaw=0.0, pitch=0.0):
"""
Turn the head. Both angles are in degrees.
"""
self.clientinfo.position.pitch = pitch
self.clientinfo.position.yaw = yaw
def look_rel(self, d_yaw=0.0, d_pitch=0.0):
self.look(self.clientinfo.position.yaw + d_yaw,
self.clientinfo.position.pitch + d_pitch)
def look_at_rel(self, delta):
self.look(*delta.yaw_pitch)
def look_at(self, pos):
delta = pos - self.clientinfo.position
delta.y -= constants.PLAYER_HEIGHT
if delta.x or delta.z:
self.look_at_rel(delta)
else:
self.look(self.clientinfo.position.yaw, delta.yaw_pitch.pitch)
def _send_dig_block(self, status, pos=None, face=constants.FACE_Y_POS):
if status == constants.DIG_START:
self.dig_pos_dict = pos.get_dict().copy()
self.net.push_packet('PLAY>Player Digging', {
'status': status,
'location': self.dig_pos_dict,
'face': face,
})
def start_digging(self, pos):
if self.auto_look:
self.look_at(pos) # TODO look at block center
self._send_dig_block(constants.DIG_START, pos)
if self.auto_swing:
self.swing_arm()
# TODO send swing animation until done or stopped
def cancel_digging(self):
self._send_dig_block(constants.DIG_CANCEL)
def finish_digging(self):
self._send_dig_block(constants.DIG_FINISH)
def dig_block(self, pos):
"""
Not cancelable.
"""
self.start_digging(pos)
self.finish_digging()
def _send_click_block(self, pos, face=1, cursor_pos=Vector3(8, 8, 8)):
self.net.push_packet('PLAY>Player Block Placement', {
'location': pos.get_dict(),
'direction': face,
'held_item': self.inventory.active_slot.get_dict(),
'cur_pos_x': int(cursor_pos.x),
'cur_pos_y': int(cursor_pos.y),
'cur_pos_z': int(cursor_pos.z),
})
def click_block(self, pos, face=1, cursor_pos=Vector3(8, 8, 8),
look_at_block=True, swing=True):
"""
Click on a block.
Examples: push button, open window, make redstone ore glow
Args:
face (int): side of the block on which the block is placed on
cursor_pos (Vector3): where to click inside the block,
each dimension 0-15
"""
if look_at_block and self.auto_look:
# TODO look at cursor_pos
self.look_at(pos)
self._send_click_block(pos, face, cursor_pos)
if swing and self.auto_swing:
self.swing_arm()
def place_block(self, pos, face=1, cursor_pos=Vector3(8, 8, 8),
sneak=True, look_at_block=True, swing=True):
"""
Place a block next to ``pos``.
If the block at ``pos`` is air, place at ``pos``.
"""
sneaking_before = self.sneaking
if sneak:
self.sneak()
self.click_block(pos, face, cursor_pos, look_at_block, swing)
if sneak:
self.sneak(sneaking_before)
def use_bucket(self, pos): # TODO
"""
Using buckets is different from placing blocks.
See "Special note on using buckets"
in http://wiki.vg/Protocol#Player_Block_Placement
"""
raise NotImplementedError(self.use_bucket.__doc__)
def activate_item(self):
"""
Use (hold right-click) the item in the active slot.
Examples: pull the bow, start eating once, throw an egg.
"""
self._send_click_block(pos=Vector3(-1, 255, -1),
face=-1,
cursor_pos=Vector3(-1, -1, -1))
def deactivate_item(self):
"""
Stop using (release right-click) the item in the active slot.
Examples: shoot the bow, stop eating.
"""
self._send_dig_block(constants.DIG_DEACTIVATE_ITEM)
def use_entity(self, entity, cursor_pos=None,
action=constants.INTERACT_ENTITY):
"""
Uses (right-click) an entity to open its window.
Setting ``cursor_pos`` sets ``action`` to "interact at".
"""
if self.auto_look:
self.look_at(Vector3(entity)) # TODO look at cursor_pos
if cursor_pos is not None:
action = constants.INTERACT_ENTITY_AT
packet = {'target': entity.eid, 'action': action}
if action == constants.INTERACT_ENTITY_AT:
packet['target_x'] = cursor_pos.x
packet['target_y'] = cursor_pos.y
packet['target_z'] = cursor_pos.z
self.net.push_packet('PLAY>Use Entity', packet)
if self.auto_swing:
self.swing_arm()
def attack_entity(self, entity):
self.use_entity(entity, action=constants.ATTACK_ENTITY)
def mount_vehicle(self, entity):
self.use_entity(entity)
def steer_vehicle(self, sideways=0.0, forward=0.0,
jump=False, unmount=False):
flags = 0
if jump:
flags += 1
if unmount:
flags += 2
self.net.push_packet('PLAY>Steer Vehicle', {
'sideways': sideways,
'forward': forward,
'flags': flags,
})
def unmount_vehicle(self):
self.steer_vehicle(unmount=True)
def jump_vehicle(self):
self.steer_vehicle(jump=True)
def write_book(self, text, author="", title="", sign=False):
"""Write text to the current book in hand, optionally sign the book"""
book = self._setup_book()
if book is None:
return False
pages = (text[0+i:constants.BOOK_CHARS_PER_PAGE+i]
for i in range(0, len(text), constants.BOOK_CHARS_PER_PAGE))
self.edit_book(pages)
if sign:
self.sign_book(author, title)
def edit_book(self, pages):
"""Set the pages of current book in hand"""
book = self._setup_book()
if book is None:
return False
nbtpages = nbt.TagList(nbt.TagString)
for i, page in enumerate(pages):
if i >= constants.BOOK_MAXPAGES:
break
nbtpages.insert(i, nbt.TagString(page))
book.nbt["pages"] = nbtpages
self.channels.send("MC|BEdit", self._pack_book(book))
def sign_book(self, author, title):
"""Sign current book in hand"""
book = self._setup_book()
if book is None:
return False
book.nbt["author"] = nbt.TagString(author)
book.nbt["title"] = nbt.TagString(title)
# TODO: don't use hard coded id
book.item_id = 387 # written book
self.channels.send("MC|BSign", self._pack_book(book))
def _setup_book(self):
book = self.inventory.active_slot
# TODO: Dont use hard coded ID
if book.item_id != 386: # book and quill
return None
if book.nbt is None:
book.nbt = nbt.TagCompound()
return book
def _pack_book(self, book):
return self.channels.encode(((MC_SLOT, "slot"),),
{"slot": book.get_dict()})
|
luken/SpockBot
|
spockbot/plugins/helpers/interact.py
|
Python
|
mit
| 9,523
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^get_cookie$', 'socialplus.views.get_cookie'),
# OTHER STUFF..
url(r'^test/(?P<test_name>[^/]+)$', 'socialplus.tests.run_test'),
url(r'^delete_all/reports$', 'socialplus.views.delete_reports'),
url(r'^delete_all/people$', 'socialplus.views.delete_people'),
url(r'^delete_all/users$', 'socialplus.views.delete_users'),
url(r'^delete_all/activities$', 'socialplus.views.delete_activities'),
url(r'^delete_all/tasks$', 'socialplus.views.delete_tasks'),
url(r'^delete_all/activities_search_index$', 'socialplus.views.delete_activities_search_index'),
url(r'^reset_domain$', 'socialplus.views.reset_domain'),
url(r'^start_task/(?P<id_>[^/]+)$', 'socialplus.views.start_task'),
# ACTIVITY
url(r'^activities', 'socialplus.views.search_activities'),
# PERSON
url(r'^people/(?P<id_>[^/]+)$', 'socialplus.views.get_person'),
url(r'^people$', 'socialplus.views.get_people'),
# COMMUNITY
url(r'^communities$', 'socialplus.views.get_communities'),
# PROVIDERDS
url(r'^providers$', 'socialplus.views.get_providers'),
# TASKS
url(r'^tasks$', 'socialplus.views.get_post_tasks'),
url(r'^tasks/completed$', 'socialplus.views.get_delete_tasks_completed'),
url(r'^tasks/active$', 'socialplus.views.get_tasks_active'),
url(r'^tasks/(?P<id_>[^/]+)$', 'socialplus.views.get_task'),
# REPORTS
url(r'^reports/(?P<reportId>[^/]+)$', 'socialplus.views.get_delete_report'),
url(r'^reports$', 'socialplus.views.get_post_reports'),
# OLD STUFF
url(r'^tag/create$', 'socialplus.views.create_tag'),
url(r'^tag/update/(?P<tagId>[^/]+)$', 'socialplus.views.update_tag'),
url(r'^tag/delete/(?P<tagId>[^/]+)$', 'socialplus.views.delete_tag'),
url(r'^tag/experts/(?P<tagId>[^/]+)$', 'socialplus.views.get_experts'),
url(r'^tags$', 'socialplus.views.get_tags'),
url(r'^autocircle/create$', 'socialplus.circles.create_autocircle'),
url(r'^autocircle/update/(?P<autocircleId>[^/]+)$', 'socialplus.circles.update_autocircle'),
url(r'^autocircle/delete/(?P<autocircleId>[^/]+)$', 'socialplus.circles.delete_autocircle'),
url(r'^autocircles$', 'socialplus.circles.get_autocircles'),
url(r'^autocircles/sync_all$', 'socialplus.circles.sync_all_circles'),
url(r'^task/sync/(?P<taskName>[^/]+)$', 'socialplus.views.start_sync'),
url(r'^task/progress/(?P<taskId>[^/]+)$', 'socialplus.views.get_task_progress'),
url(r'^tasks/delete_completed$', 'socialplus.views.delete_completed_tasks'),
url(r'^sync/(?P<taskId>[^/]+)$', 'socialplus.views.sync_task'),
)
|
lucacioria/socialplus-prototype
|
DJANGO_GAE/socialplus/urls.py
|
Python
|
unlicense
| 2,685
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def ResourcePoolReconfiguredEvent(vim, *args, **kwargs):
'''This event records when a resource pool configuration is changed.'''
obj = vim.client.factory.create('ns0:ResourcePoolReconfiguredEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 5:
raise IndexError('Expected at least 6 arguments got: %d' % len(args))
required = [ 'resourcePool', 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
xuru/pyvisdk
|
pyvisdk/do/resource_pool_reconfigured_event.py
|
Python
|
mit
| 1,197
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Candidate.picture'
db.add_column('political_candidate', 'picture',
self.gf('django.db.models.fields.URLField')(max_length=200, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Candidate.picture'
db.delete_column('political_candidate', 'picture')
models = {
'geo.municipality': {
'Meta': {'object_name': 'Municipality'},
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'political.campaignbudget': {
'Meta': {'unique_together': "(('candidate', 'advance'),)", 'object_name': 'CampaignBudget'},
'advance': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'candidate': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['political.Candidate']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time_submitted': ('django.db.models.fields.DateTimeField', [], {})
},
'political.campaignexpense': {
'Meta': {'unique_together': "(('budget', 'type'),)", 'object_name': 'CampaignExpense'},
'budget': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['political.CampaignBudget']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sum': ('django.db.models.fields.DecimalField', [], {'max_digits': '15', 'decimal_places': '2'}),
'time_submitted': ('django.db.models.fields.DateTimeField', [], {}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['political.CampaignExpenseType']"})
},
'political.campaignexpensetype': {
'Meta': {'object_name': 'CampaignExpenseType'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'})
},
'political.candidate': {
'Meta': {'unique_together': "(('person', 'municipality', 'election'), ('number', 'municipality', 'election'))", 'object_name': 'Candidate'},
'election': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['political.Election']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geo.Municipality']", 'null': 'True'}),
'number': ('django.db.models.fields.PositiveIntegerField', [], {}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['political.Party']", 'null': 'True'}),
'party_code': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['political.Person']"}),
'picture': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'profession': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'political.candidatefeed': {
'Meta': {'object_name': 'CandidateFeed', '_ormbases': ['social.Feed']},
'candidate': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['political.Candidate']"}),
'feed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['social.Feed']", 'unique': 'True', 'primary_key': 'True'})
},
'political.election': {
'Meta': {'object_name': 'Election'},
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'round': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'year': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'political.municipalitycommittee': {
'Meta': {'object_name': 'MunicipalityCommittee'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geo.Municipality']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'political.municipalitytrustee': {
'Meta': {'object_name': 'MunicipalityTrustee'},
'begin': ('django.db.models.fields.DateField', [], {}),
'committee': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['political.MunicipalityCommittee']"}),
'election': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['political.Election']"}),
'end': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['political.Person']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'political.party': {
'Meta': {'object_name': 'Party'},
'abbrev': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'political.partyname': {
'Meta': {'object_name': 'PartyName'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['political.Party']"})
},
'political.person': {
'Meta': {'unique_together': "(('first_name', 'last_name', 'municipality', 'index'),)", 'object_name': 'Person'},
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'municipality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geo.Municipality']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['political.Party']", 'null': 'True'})
},
'political.votingdistrict': {
'Meta': {'unique_together': "(('municipality', 'origin_id'),)", 'object_name': 'VotingDistrict'},
'borders': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'null': 'True'}),
'elections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['political.Election']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['geo.Municipality']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'origin_id': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'social.feed': {
'Meta': {'unique_together': "(('type', 'origin_id'),)", 'object_name': 'Feed'},
'account_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interest': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'origin_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'picture': ('django.db.models.fields.URLField', [], {'max_length': '250', 'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'update_error_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
}
}
complete_apps = ['political']
|
kansanmuisti/datavaalit
|
web/political/migrations/0005_auto__add_field_candidate_picture.py
|
Python
|
agpl-3.0
| 9,140
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import shutil
from io import open
import click
import yaml
from shellfoundry.utilities.archive_creator import ArchiveCreator
from shellfoundry.utilities.shell_package import ShellPackage
from shellfoundry.utilities.temp_dir_context import TempDirContext
class ShellPackageBuilder(object):
DRIVER_DIR = "src"
DEPLOY_DIR = "deployments"
def pack(self, path):
"""Creates TOSCA based Shell package."""
self._remove_all_pyc(path)
shell_package = ShellPackage(path)
shell_name = shell_package.get_shell_name()
shell_real_name = shell_package.get_name_from_definition()
with TempDirContext(shell_name) as package_path:
self._copy_tosca_meta(package_path, "")
tosca_meta = self._read_tosca_meta(path)
shell_definition_path = tosca_meta["Entry-Definitions"]
self._copy_shell_definition(package_path, "", shell_definition_path)
with open(shell_definition_path, encoding="utf8") as shell_definition_file:
shell_definition = yaml.safe_load(shell_definition_file)
if "template_icon" in shell_definition["metadata"]:
self._copy_artifact(
shell_definition["metadata"]["template_icon"], package_path
)
for node_type in list(shell_definition["node_types"].values()):
if "artifacts" not in node_type:
continue
artifact_path_list = []
for artifact_name, artifact in node_type["artifacts"].items():
if artifact_name == "driver":
artifact_path_list.append(
self._create_driver(
path="",
package_path=os.curdir,
dir_path=self.DRIVER_DIR,
driver_name=os.path.basename(artifact["file"]),
)
)
elif artifact_name == "deployment":
artifact_path_list.append(
self._create_driver(
path="",
package_path=os.curdir,
dir_path=self.DEPLOY_DIR,
driver_name=os.path.basename(artifact["file"]),
mandatory=False,
)
)
self._copy_artifact(artifact["file"], package_path)
zip_path = self._zip_package(package_path, "", shell_real_name)
try:
self._remove_build_artifacts(artifact_path_list)
except Exception:
pass
click.echo("Shell package was successfully created: " + zip_path)
def _copy_artifact(self, artifact_path, package_path):
if os.path.exists(artifact_path):
click.echo("Adding artifact to shell package: " + artifact_path)
self._copy_file(src_file_path=artifact_path, dest_dir_path=package_path)
else:
click.echo("Missing artifact not added to shell package: " + artifact_path)
def _read_tosca_meta(self, path):
tosca_meta = {}
shell_package = ShellPackage(path)
with open(shell_package.get_metadata_path(), encoding="utf8") as meta_file:
for meta_line in meta_file:
(key, val) = meta_line.split(":")
tosca_meta[key] = val.strip()
return tosca_meta
def _copy_shell_icon(self, package_path, path):
self._copy_file(
src_file_path=os.path.join(path, "shell-icon.png"),
dest_dir_path=package_path,
)
def _copy_shell_definition(self, package_path, path, shell_definition):
self._copy_file(
src_file_path=os.path.join(path, shell_definition),
dest_dir_path=package_path,
)
def _copy_tosca_meta(self, package_path, path):
shell_package = ShellPackage(path)
self._copy_file(
src_file_path=shell_package.get_metadata_path(),
dest_dir_path=os.path.join(package_path, "TOSCA-Metadata"),
)
@staticmethod
def _remove_all_pyc(package_path):
for root, dirs, files in os.walk(package_path):
for file in files:
if file.endswith(".pyc"):
os.remove(os.path.join(root, file))
@staticmethod
def _create_driver(path, package_path, dir_path, driver_name, mandatory=True):
dir_to_zip = os.path.join(path, dir_path)
if os.path.exists(dir_to_zip):
zip_file_path = os.path.join(package_path, driver_name)
ArchiveCreator.make_archive(zip_file_path, "zip", dir_to_zip)
return os.path.abspath(zip_file_path)
elif mandatory:
raise click.ClickException(
"Invalid driver structure. Can't find '{}' driver folder.".format(
dir_path
)
)
@staticmethod
def _copy_file(src_file_path, dest_dir_path):
if not os.path.exists(dest_dir_path):
os.makedirs(dest_dir_path)
shutil.copy(src_file_path, dest_dir_path)
@staticmethod
def _zip_package(package_path, path, package_name):
zip_file_path = os.path.join(path, "dist", package_name)
return ArchiveCreator.make_archive(zip_file_path, "zip", package_path)
@staticmethod
def _remove_build_artifacts(artifacts_path_list):
for artifact_path in artifacts_path_list:
if artifact_path and os.path.exists(artifact_path):
os.remove(artifact_path)
|
QualiSystems/shellfoundry
|
shellfoundry/utilities/shell_package_builder.py
|
Python
|
apache-2.0
| 5,913
|
#! -*- coding:utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
from kullanici.models import IsVeren
class Adres(models.Model):
kullanici = models.ForeignKey(IsVeren,related_name="adres")
adres_basligi = models.CharField(max_length=50)
adres = models.CharField(max_length=80)
sehir = models.CharField(max_length=40)
ilce = models.CharField(max_length=40,blank=True, null=True)
semt = models.CharField(max_length=40,blank=True, null=True)
mahalle = models.CharField(max_length=40,blank=True, null=True)
sokak = models.CharField(max_length=40,blank=True, null=True)
no = models.CharField(max_length=10,blank=True, null=True)
posta_kodu = models.CharField(max_length=40,blank=True, null=True)
def __str__(self):
return self.kullanici.kullanici.username + " " + self.adres_basligi
class Meta:
verbose_name ="İş Veren Adresi"
verbose_name_plural="İş Veren Adresleri"
def adresSec(adresID):
return Adres.objects.get(pk=adresID)
|
ufukdogan92/is-teklif-sistemi
|
adres/models.py
|
Python
|
gpl-3.0
| 1,053
|
#!/usr/bin/env python
# vim: fdm=indent
'''
author: Fabio Zanini
date: 07/08/17
content: Test Dataset class.
'''
import numpy as np
import pytest
@pytest.fixture(scope="module")
def ds():
from singlet.dataset import Dataset
return Dataset(samplesheet='example_sheet_tsv', counts_table='example_table_tsv')
def test_features_phenotypes(ds):
r = ds.correlation.correlate_features_phenotypes(
phenotypes=['quantitative_phenotype_1_[A.U.]'],
features=['TSPAN6', 'DPM1'])
assert(np.isclose(r.values[0, 0], -0.8, rtol=1e-1, atol=1e-1))
def test_features_phenotype(ds):
r = ds.correlation.correlate_features_phenotypes(
phenotypes='quantitative_phenotype_1_[A.U.]',
features=['TSPAN6', 'DPM1'])
assert(np.isclose(r.values[0], -0.8, rtol=1e-1, atol=1e-1))
def test_feature_phenotypes(ds):
r = ds.correlation.correlate_features_phenotypes(
phenotypes=['quantitative_phenotype_1_[A.U.]'],
features='TSPAN6')
assert(np.isclose(r.values[0], -0.8, rtol=1e-1, atol=1e-1))
def test_feature_phenotype(ds):
r = ds.correlation.correlate_features_phenotypes(
phenotypes='quantitative_phenotype_1_[A.U.]',
features='TSPAN6')
assert(np.isclose(r, -0.8, rtol=1e-1, atol=1e-1))
def test_features_phenotypes_pearson(ds):
r = ds.correlation.correlate_features_phenotypes(
phenotypes=['quantitative_phenotype_1_[A.U.]'],
features=['TSPAN6', 'DPM1'],
method='pearson',
fillna=0)
assert(np.isclose(r.values[1, 0], -0.6, rtol=1e-1, atol=1e-1))
def test_features_phenotypes_fillna(ds):
r = ds.correlation.correlate_features_phenotypes(
phenotypes='quantitative_phenotype_1_[A.U.]',
features=['TSPAN6', 'DPM1'],
method='pearson',
fillna={'quantitative_phenotype_1_[A.U.]': 0})
assert(np.isclose(r.values[1], -0.6, rtol=1e-1, atol=1e-1))
def test_features_phenotypes_pearson_all(ds):
ds2 = ds.query_features_by_name(['TSPAN6', 'DPM1'])
r = ds2.correlation.correlate_features_phenotypes(
phenotypes=['quantitative_phenotype_1_[A.U.]'],
features='all',
method='pearson',
fillna=0)
assert(np.isclose(r.values[1, 0], -0.6, rtol=1e-1, atol=1e-1))
def test_features_phenotypes_pearson_fillna(ds):
r = ds.correlation.correlate_phenotypes_phenotypes(
phenotypes='quantitative_phenotype_1_[A.U.]',
phenotypes2='quantitative_phenotype_1_[A.U.]',
method='pearson',
fillna={'quantitative_phenotype_1_[A.U.]': 0},
fillna2={'quantitative_phenotype_1_[A.U.]': 0},
)
assert(np.isclose(r, 1, rtol=1e-1, atol=1e-1))
def test_features_features(ds):
r = ds.correlation.correlate_features_features(
features=['TSPAN6', 'DPM1'],
features2=['TSPAN6'],
method='pearson')
assert(np.isclose(r.values[0, 0], 1, rtol=1e-1, atol=1e-1))
def test_features_features_all(ds):
ds2 = ds.query_features_by_name(['TSPAN6', 'DPM1'])
r = ds2.correlation.correlate_features_features(
features='all',
features2='TSPAN6',
method='pearson')
assert(np.isclose(r.values[0], 1, rtol=1e-1, atol=1e-1))
def test_samples(ds):
n = ds.n_samples
r = ds.correlation.correlate_samples()
assert(np.allclose(r.values[np.arange(n), np.arange(n)], 1))
def test_samples_2(ds):
n = ds.n_samples
sns = ds.samplenames
r = ds.correlation.correlate_samples(
samples=sns,
samples2=sns,
)
assert(np.allclose(r.values[np.arange(n), np.arange(n)], 1))
def test_samples_3(ds):
n = ds.n_samples
sns = ds.samplenames
r = ds.correlation.correlate_samples(
samples=sns,
samples2='all',
)
assert(np.allclose(r.values[np.arange(n), np.arange(n)], 1))
def test_samples_withpheno(ds):
sns = ds.samplenames
print(ds.samplesheet.columns)
r = ds.correlation.correlate_samples(
samples=sns[0],
samples2=sns[0],
phenotypes=['quantitative_phenotype_1_[A.U.]'],
)
assert(np.isclose(r, 1))
|
iosonofabio/singlet
|
test/dataset/test_correlation.py
|
Python
|
mit
| 4,276
|
from functools import reduce
from operator import mul
print(sum(int(c) for c in str(reduce(mul, range(1,101)))))
|
jokkebk/livecoding
|
Euler16-20/p20.py
|
Python
|
mit
| 114
|
"""Support for Sensirion SHT31 temperature and humidity sensor."""
from datetime import timedelta
import logging
import math
from Adafruit_SHT31 import SHT31
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
CONF_MONITORED_CONDITIONS,
CONF_NAME,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
TEMP_CELSIUS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_I2C_ADDRESS = "i2c_address"
DEFAULT_NAME = "SHT31"
DEFAULT_I2C_ADDRESS = 0x44
SENSOR_TEMPERATURE = "temperature"
SENSOR_HUMIDITY = "humidity"
SENSOR_TYPES = (SENSOR_TEMPERATURE, SENSOR_HUMIDITY)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=10)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): vol.All(
vol.Coerce(int), vol.Range(min=0x44, max=0x45)
),
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensor platform."""
i2c_address = config.get(CONF_I2C_ADDRESS)
sensor = SHT31(address=i2c_address)
try:
if sensor.read_status() is None:
raise ValueError("CRC error while reading SHT31 status")
except (OSError, ValueError):
_LOGGER.error("SHT31 sensor not detected at address %s", hex(i2c_address))
return
sensor_client = SHTClient(sensor)
sensor_classes = {
SENSOR_TEMPERATURE: SHTSensorTemperature,
SENSOR_HUMIDITY: SHTSensorHumidity,
}
devs = []
for sensor_type, sensor_class in sensor_classes.items():
name = f"{config.get(CONF_NAME)} {sensor_type.capitalize()}"
devs.append(sensor_class(sensor_client, name))
add_entities(devs)
class SHTClient:
"""Get the latest data from the SHT sensor."""
def __init__(self, adafruit_sht):
"""Initialize the sensor."""
self.adafruit_sht = adafruit_sht
self.temperature = None
self.humidity = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from the SHT sensor."""
temperature, humidity = self.adafruit_sht.read_temperature_humidity()
if math.isnan(temperature) or math.isnan(humidity):
_LOGGER.warning("Bad sample from sensor SHT31")
return
self.temperature = temperature
self.humidity = humidity
class SHTSensor(SensorEntity):
"""An abstract SHTSensor, can be either temperature or humidity."""
def __init__(self, sensor, name):
"""Initialize the sensor."""
self._sensor = sensor
self._name = name
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
def update(self):
"""Fetch temperature and humidity from the sensor."""
self._sensor.update()
class SHTSensorTemperature(SHTSensor):
"""Representation of a temperature sensor."""
_attr_device_class = DEVICE_CLASS_TEMPERATURE
_attr_native_unit_of_measurement = TEMP_CELSIUS
def update(self):
"""Fetch temperature from the sensor."""
super().update()
self._state = self._sensor.temperature
class SHTSensorHumidity(SHTSensor):
"""Representation of a humidity sensor."""
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement."""
return PERCENTAGE
def update(self):
"""Fetch humidity from the sensor."""
super().update()
humidity = self._sensor.humidity
if humidity is not None:
self._state = round(humidity)
|
sander76/home-assistant
|
homeassistant/components/sht31/sensor.py
|
Python
|
apache-2.0
| 4,063
|
from setuptools import setup, find_packages
setup(
name="txtalert",
version="0.1",
url='http://github.com/praekelt/txtalert',
license='GPL',
description=("txtAlert sends automated, personalized SMS reminders "
"to patients on chronic medication."),
long_description = open('README.rst', 'r').read(),
author = 'Praekelt Foundation',
author_email = "dev@praekeltfoundation.org",
packages = find_packages(),
install_requires=[
'Django>=1.6,<1.7',
'django-nose',
'gdata==2.0.18',
'xlrd==0.7.1',
'django-dirtyfields==0.1',
'django-historicalrecords==1.1',
'iso8601',
'south==0.7.3',
'gunicorn==0.12.1',
'supervisor',
'django-geckoboard',
'python-memcached==1.48',
'raven',
'pytz',
'django-markup-deprecated',
'django-autocomplete-light',
'psycopg2',
'redis',
],
classifiers=[
'Development Status :: 5 - Production/Stable'
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
praekelt/txtalert
|
setup.py
|
Python
|
gpl-3.0
| 1,296
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: iso8859_9.py
""" Python Character Mapping Codec iso8859_9 generated from 'MAPPINGS/ISO8859/8859-9.TXT' with gencodec.py.
"""
import codecs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(name='iso8859-9', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
decoding_table = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0¡¢£¤¥¦§¨©ª«¬\xad®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏĞÑÒÓÔÕÖרÙÚÛÜİŞßàáâãäåæçèéêëìíîïğñòóôõö÷øùúûüışÿ'
encoding_table = codecs.charmap_build(decoding_table)
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/encodings/iso8859_9.py
|
Python
|
unlicense
| 1,902
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
clr.ImportExtensions(Revit.GeometryConversion)
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
doc = DocumentManager.Instance.CurrentDBDocument
view = UnwrapElement(IN[0])
eyeposition = IN[1].ToXyz()
updirection = IN[2].ToXyz()
forwarddirection = IN[3].ToXyz()
TransactionManager.Instance.EnsureInTransaction(doc)
try:
newVO = ViewOrientation3D(eyeposition, updirection, forwarddirection)
view.SetOrientation(newVO)
view.SaveOrientation()
OUT = True
except:
OUT = False
TransactionManager.Instance.TransactionTaskDone()
|
andydandy74/ClockworkForDynamo
|
nodes/2.x/python/PerspectiveView.OrientToEyeAndTargetPosition.py
|
Python
|
mit
| 800
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Common text strings"""
def direction_in():
"""Log line text indicating received by router"""
return "<-"
def direction_out():
"""Log line text indicating transmitted by router"""
return "->"
def lozenge():
"""
:return: HTML document lozenge character
"""
return "◊"
def nbsp():
"""
:return: HTML Non-breaking space
"""
return " "
"""Large text strings used by main that change infrequently"""
# html head, start body
def web_page_head():
return """<!DOCTYPE html>
<html>
<head>
<title>Adverbl Analysis - qpid-dispatch router logs</title>
<style>
* {
font-family: sans-serif;
}
table {
border-collapse: collapse;
}
table, td, th {
border: 1px solid black;
padding: 3px;
}
</style>
<script src="http://ajax.googleapis.com/ajax/libs/dojo/1.4/dojo/dojo.xd.js" type="text/javascript"></script>
<!-- <script src="http://ajax.googleapis.com/ajax/libs/dojo/1.4/dojo/dojo.xd.js" type="text/javascript"></script> -->
<script type="text/javascript">
function node_is_visible(node)
{
if(dojo.isString(node))
node = dojo.byId(node);
if(!node)
return false;
return node.style.display == "block";
}
function set_node(node, str)
{
if(dojo.isString(node))
node = dojo.byId(node);
if(!node) return;
node.style.display = str;
}
function toggle_node(node)
{
if(dojo.isString(node))
node = dojo.byId(node);
if(!node) return;
set_node(node, (node_is_visible(node)) ? 'none' : 'block');
}
function hide_node(node)
{
set_node(node, 'none');
}
function show_node(node)
{
set_node(node, 'block');
}
function go_back()
{
window.history.back();
}
"""
def web_page_toc():
return """
<h3>Contents</h3>
<table>
<tr> <th>Section</th> <th>Description</th> </tr>
<tr><td><a href=\"#c_logfiles\" >Log files</a></td> <td>Router and log file info</td></tr>
<tr><td><a href=\"#c_rtrinstances\" >Router Instances</a></td> <td>Router reboot chronology</td></tr>
<tr><td><a href=\"#c_connections\" >Connections</a></td> <td>Connection overview; per connection log data view control</td></tr>
<tr><td><a href=\"#c_addresses\" >Addresses</a></td> <td>AMQP address usage</td></tr>
<tr><td><a href=\"#c_connectchrono\" >Connection Chronology</a></td> <td>Router restart and connection chronology</td></tr>
<tr><td><a href=\"#c_conndetails\" >Connection Details</a></td> <td>Connection details; frames sorted by link</td></tr>
<tr><td><a href=\"#c_noteworthy\" >Noteworthy log lines</a></td> <td>AMQP errors and interesting flags</td></tr>
<tr><td><a href=\"#c_logdata\" >Log data</a></td> <td>Main AMQP traffic table</td></tr>
<tr><td><a href=\"#c_messageprogress\">Message progress</a></td> <td>Tracking messages through the system</td></tr>
<tr><td><a href=\"#c_linkprogress\" >Link name propagation</a></td> <td>Tracking link names</td></tr>
<tr><td><a href=\"#c_rtrdump\" >Router name index</a></td> <td>Short vs. long router container names</td></tr>
<tr><td><a href=\"#c_peerdump\" >Peer name index</a></td> <td>Short vs. long peer names</td></tr>
<tr><td><a href=\"#c_linkdump\" >Link name index</a></td> <td>Short vs. long link names</td></tr>
<tr><td><a href=\"#c_msgdump\" >Transfer name index</a></td> <td>Short names representing transfer data</td></tr>
<tr><td><a href=\"#c_ls\" >Router link state</a></td> <td>Link state analysis</td></tr>
<tr><td><a href=\"#c_sequence\" >Sequence diagram data</a></td> <td>Input data for seq-diag-gen.py utility</td></tr>
</table>
<hr>
"""
if __name__ == "__main__":
pass
|
ted-ross/qpid-dispatch
|
tools/scraper/text.py
|
Python
|
apache-2.0
| 4,550
|
#!/usr/bin/python
import sys;
from AnnotationLib import *
from optparse import OptionParser
if __name__ == "__main__":
parser = OptionParser();
parser.add_option('-a', '--annolist', dest='annolist_name', type="string", help='input annotation list (*.al or *.idl)', default=None)
parser.add_option('-o', '--output_dir', dest='output_dir', type="string", help='directory for saving tracking results', default='./')
parser.add_option('-f', '--first', dest='firstidx', type="int", help='first image to start tracking (0-based)', default=0)
parser.add_option('-n', '--numimgs', dest='numimgs', type="int", help='number of images to process in the original image list', default=-1)
parser.add_option('--step', dest='step', type="int", help='step between images used when generating subset of the original image list', default=-1)
parser.add_option('--subset_with_last', action="store_true", dest='do_subset_with_last', help='subset of the image list')
parser.add_option('--subset', action="store_true", dest='do_subset', help='subset of the image list')
parser.add_option('--convert', dest='convert_name', type="string", help='convert/save to different format')
parser.add_option('--min_width', dest='min_width', type="int", help='remove bounding boxes with width smaller than threshold', default=-1)
parser.add_option('--max_width', dest='max_width', type="int", help='remove bounding boxes with width larger than threshold', default=-1)
parser.add_option('--merge_sort', dest='merge_sort', type="string", help='merge two annotation lists and sort the result by filename')
(opts, args) = parser.parse_args()
annolist_basedir = os.path.dirname(opts.annolist_name)
print "loading ", opts.annolist_name;
annolist = parse(opts.annolist_name);
# opts.firstidx = int(opts.firstidx);
# opts.numimgs = int(opts.numimgs);
if opts.do_subset or opts.do_subset_with_last:
if opts.numimgs == -1:
numimgs = len(annolist);
else:
numimgs = opts.numimgs;
firstidx = opts.firstidx;
lastidx = opts.firstidx + numimgs - 1;
print "processing images " + str(firstidx) + " to " + str(lastidx)
if opts.step != -1:
annolist_out = annolist[firstidx:lastidx+1:opts.step];
# always include the last element when generating a subset
if opts.do_subset_with_last:
if annolist_out[-1].imageName != annolist[-1].imageName:
annolist_out.append(annolist[-1]);
else:
annoilst_out = annolist[firstidx:lastidx+1];
annolist_path, annolist_base_ext = os.path.split(opts.annolist_name);
annolist_base, annolist_ext = os.path.splitext(annolist_base_ext);
save_filename = opts.output_dir + "/" + annolist_base + "-subset";
if firstidx != 0 or lastidx != len(annolist) - 1:
save_filename += "-firstidx" + str(firstidx) + "-lastidx" + str(lastidx)
if opts.step != -1:
if opts.do_subset_with_last:
save_filename += "-steplast" + str(opts.step);
else:
save_filename += "-step" + str(opts.step);
save_filename += annolist_ext;
print "saving " + save_filename;
save(save_filename, annolist_out);
elif opts.merge_sort:
merge_filename = opts.merge_sort;
print "loading ", merge_filename;
annolist_merge = parse(merge_filename);
annolist = annolist + annolist_merge;
annolist.sort(key=lambda a: (a.imageName));
save_filename = opts.output_dir;
fname_ext = os.path.splitext(save_filename)[1];
print "format: ",fname_ext;
if fname_ext == ".al" or fname_ext == ".pal":
assert(save_filename != opts.annolist_name and save_filename != merge_filename)
print "saving ", save_filename;
save(save_filename, annolist);
else:
print "unrecognized output format";
elif opts.min_width > 0 or opts.max_width > 0:
min_width = opts.min_width;
max_width = opts.max_width;
if opts.max_width <= 0:
max_width = 1e6;
for a in annolist:
a.rects = [r for r in a.rects if r.width() > min_width and r.width() < max_width];
annolist_path, annolist_base_ext = os.path.split(opts.annolist_name);
annolist_base, annolist_ext = os.path.splitext(annolist_base_ext);
save_filename = annolist_path + "/" + annolist_base;
if opts.min_width != -1:
save_filename += "-minwidth" + str(opts.min_width)
if opts.max_width != -1:
save_filename += "-maxwidth" + str(opts.max_width)
save_filename += annolist_ext;
print "saving " + save_filename;
save(save_filename, annolist);
elif opts.convert_name != None:
print "saving ", opts.convert_name;
save(opts.convert_name, annolist);
|
sameeptandon/sail-car-log
|
car_tracking/annoProc.py
|
Python
|
bsd-2-clause
| 5,054
|
'''
Created on Mar 1, 2016
@author: PJ
'''
from BaseScouting.api_scraper.fake_data_scripts.TempRetrofilSrFromOfficialResults import TempRetrofillSrFromOfficialResults
from BaseScouting.load_django import load_django
class TempRetrofillSrFromOfficialResults2017(TempRetrofillSrFromOfficialResults):
gear_lookup = {0: 0,
1: 1,
2: 3,
3: 7,
4: 13,
}
def __get_common_fields(self, official_match_sr, round_up=False):
output = {}
# auto_rotor_count = official_match_sr.rotor1Auto + official_match_sr.rotor2Auto
# tele_rotor_count = official_match_sr.rotor1Engaged + official_match_sr.rotor2Engaged + official_match_sr.rotor3Engaged + official_match_sr.rotor4Engaged
# auto_gears = self.gear_lookup[auto_rotor_count]
# tele_gears = self.gear_lookup[tele_rotor_count]
auto_gears = 0
if official_match_sr.rotor2Auto:
auto_gears = 3
elif official_match_sr.rotor1Auto:
auto_gears = 1
tele_gears = 0
if official_match_sr.rotor4Engaged:
tele_gears = 13
elif official_match_sr.rotor3Engaged:
tele_gears = 7
elif official_match_sr.rotor2Engaged:
tele_gears = 3
elif official_match_sr.rotor1Engaged:
tele_gears = 1
output['auto_fuel_high_score'] = int(official_match_sr.autoFuelHigh / 3.0)
output['auto_fuel_low_score'] = int(official_match_sr.autoFuelLow / 3.0)
output['auto_gears'] = int(auto_gears / 3.0)
output['tele_fuel_high_score'] = int(official_match_sr.teleopFuelHigh / 3.0)
output['tele_fuel_low_score'] = int(official_match_sr.teleopFuelLow / 3.0)
output['tele_gears'] = int(tele_gears / 3.0)
if round_up:
output['auto_fuel_high_score'] += int(official_match_sr.autoFuelHigh % 3.0)
output['auto_fuel_low_score'] += int(official_match_sr.autoFuelLow % 3.0)
output['auto_gears'] += int(auto_gears % 3.0)
output['tele_fuel_high_score'] += int(official_match_sr.teleopFuelHigh % 3.0)
output['tele_fuel_low_score'] += int(official_match_sr.teleopFuelLow % 3.0)
output['tele_gears'] += int(tele_gears % 3.0)
output['auto_fuel_high_shots'] = output['auto_fuel_high_score']
output['auto_fuel_low_shots'] = output['auto_fuel_low_score']
output['tele_fuel_high_shots'] = output['tele_fuel_high_score']
output['tele_fuel_low_shots'] = output['tele_fuel_low_score']
return output
def get_team1_stats(self, official_match_sr):
output = self.__get_common_fields(official_match_sr)
output['auto_baseline'] = official_match_sr.robot1Auto == "Mobility"
output['rope'] = official_match_sr.touchpadFar == 1
output['foul'] = official_match_sr.foulCount >= 1
output['tech_foul'] = official_match_sr.techFoulCount >= 1
return output
def get_team2_stats(self, official_match_sr):
output = self.__get_common_fields(official_match_sr)
output['auto_baseline'] = official_match_sr.robot2Auto == "Mobility"
output['rope'] = official_match_sr.touchpadMiddle == 1
output['foul'] = official_match_sr.foulCount >= 2
output['tech_foul'] = official_match_sr.techFoulCount >= 2
return output
def get_team3_stats(self, official_match_sr):
output = self.__get_common_fields(official_match_sr, round_up=True)
output['auto_baseline'] = official_match_sr.robot3Auto == "Mobility"
output['rope'] = official_match_sr.touchpadNear == 1
output['foul'] = official_match_sr.foulCount >= 3
output['tech_foul'] = official_match_sr.techFoulCount >= 3
return output
def retrofill_results(min_match_number, max_match_number):
from Scouting2017.model.reusable_models import OfficialMatch, Match
from Scouting2017.model.models2017 import OfficialMatchScoreResult, ScoreResult
populater = TempRetrofillSrFromOfficialResults2017()
for match_number in range(min_match_number, max_match_number):
official_matches = OfficialMatch.objects.filter(matchNumber=match_number)
if len(official_matches) != 1:
continue
official_match = official_matches[0]
official_srs = OfficialMatchScoreResult.objects.filter(official_match=official_match)
match, _ = Match.objects.get_or_create(matchNumber=official_match.matchNumber)
populater.populate_matchresults(official_match, Match, ScoreResult, OfficialMatchScoreResult)
if __name__ == "__main__":
load_django()
retrofill_results(1, 40)
# retrofill_results(7, 8)
|
ArcticWarriors/scouting-app
|
ScoutingWebsite/Scouting2017/api_scraper/fake_data_scripts/retrofil_matchresults.py
|
Python
|
mit
| 4,792
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
RimuHosting Driver
"""
try:
import simplejson as json
except ImportError:
import json
from libcloud.common.base import ConnectionKey, JsonResponse
from libcloud.common.types import InvalidCredsError
from libcloud.compute.types import Provider, NodeState
from libcloud.compute.base import NodeDriver, NodeSize, Node, NodeLocation
from libcloud.compute.base import NodeImage, NodeAuthPassword
API_CONTEXT = '/r'
API_HOST = 'rimuhosting.com'
class RimuHostingException(Exception):
"""
Exception class for RimuHosting driver
"""
def __str__(self):
return self.args[0]
def __repr__(self):
return "<RimuHostingException '%s'>" % (self.args[0])
class RimuHostingResponse(JsonResponse):
def __init__(self, response, connection):
self.body = response.read()
self.status = response.status
self.headers = dict(response.getheaders())
self.error = response.reason
self.connection = connection
if self.success():
self.object = self.parse_body()
def success(self):
if self.status == 403:
raise InvalidCredsError()
return True
def parse_body(self):
try:
js = super(RimuHostingResponse, self).parse_body()
keys = list(js.keys())
if js[keys[0]]['response_type'] == "ERROR":
raise RimuHostingException(
js[keys[0]]['human_readable_message']
)
return js[keys[0]]
except KeyError:
raise RimuHostingException('Could not parse body: %s'
% (self.body))
class RimuHostingConnection(ConnectionKey):
"""
Connection class for the RimuHosting driver
"""
api_context = API_CONTEXT
host = API_HOST
port = 443
responseCls = RimuHostingResponse
def __init__(self, key, secure=True):
# override __init__ so that we can set secure of False for testing
ConnectionKey.__init__(self, key, secure)
def add_default_headers(self, headers):
# We want JSON back from the server. Could be application/xml
# (but JSON is better).
headers['Accept'] = 'application/json'
# Must encode all data as json, or override this header.
headers['Content-Type'] = 'application/json'
headers['Authorization'] = 'rimuhosting apikey=%s' % (self.key)
return headers
def request(self, action, params=None, data='', headers=None,
method='GET'):
if not headers:
headers = {}
if not params:
params = {}
# Override this method to prepend the api_context
return ConnectionKey.request(self, self.api_context + action,
params, data, headers, method)
class RimuHostingNodeDriver(NodeDriver):
"""
RimuHosting node driver
"""
type = Provider.RIMUHOSTING
name = 'RimuHosting'
website = 'http://rimuhosting.com/'
connectionCls = RimuHostingConnection
def __init__(self, key, host=API_HOST, port=443,
api_context=API_CONTEXT, secure=True):
"""
@param key: API key (required)
@type key: C{str}
@param host: hostname for connection
@type host: C{str}
@param port: Override port used for connections.
@type port: C{int}
@param api_context: Optional API context.
@type api_context: C{str}
@param secure: Weither to use HTTPS or HTTP.
@type secure: C{bool}
@rtype: C{None}
"""
# Pass in some extra vars so that
self.key = key
self.secure = secure
self.connection = self.connectionCls(key, secure)
self.connection.host = host
self.connection.api_context = api_context
self.connection.port = port
self.connection.driver = self
self.connection.connect()
def _order_uri(self, node, resource):
# Returns the order uri with its resourse appended.
return "/orders/%s/%s" % (node.id, resource)
# TODO: Get the node state.
def _to_node(self, order):
n = Node(id=order['slug'],
name=order['domain_name'],
state=NodeState.RUNNING,
public_ips=(
[order['allocated_ips']['primary_ip']]
+ order['allocated_ips']['secondary_ips']
),
private_ips=[],
driver=self.connection.driver,
extra={
'order_oid': order['order_oid'],
'monthly_recurring_fee': order.get(
'billing_info').get('monthly_recurring_fee')})
return n
def _to_size(self, plan):
return NodeSize(
id=plan['pricing_plan_code'],
name=plan['pricing_plan_description'],
ram=plan['minimum_memory_mb'],
disk=plan['minimum_disk_gb'],
bandwidth=plan['minimum_data_transfer_allowance_gb'],
price=plan['monthly_recurring_amt']['amt_usd'],
driver=self.connection.driver
)
def _to_image(self, image):
return NodeImage(id=image['distro_code'],
name=image['distro_description'],
driver=self.connection.driver)
def list_sizes(self, location=None):
# Returns a list of sizes (aka plans)
# Get plans. Note this is really just for libcloud.
# We are happy with any size.
if location is None:
location = ''
else:
location = ";dc_location=%s" % (location.id)
res = self.connection.request(
'/pricing-plans;server-type=VPS%s' % (location)).object
return list(map(lambda x: self._to_size(x), res['pricing_plan_infos']))
def list_nodes(self):
# Returns a list of Nodes
# Will only include active ones.
res = self.connection.request('/orders;include_inactive=N').object
return list(map(lambda x: self._to_node(x), res['about_orders']))
def list_images(self, location=None):
# Get all base images.
# TODO: add other image sources. (Such as a backup of a VPS)
# All Images are available for use at all locations
res = self.connection.request('/distributions').object
return list(map(lambda x: self._to_image(x), res['distro_infos']))
def reboot_node(self, node):
# Reboot
# PUT the state of RESTARTING to restart a VPS.
# All data is encoded as JSON
data = {'reboot_request': {'running_state': 'RESTARTING'}}
uri = self._order_uri(node, 'vps/running-state')
self.connection.request(uri, data=json.dumps(data), method='PUT')
# XXX check that the response was actually successful
return True
def destroy_node(self, node):
# Shutdown a VPS.
uri = self._order_uri(node, 'vps')
self.connection.request(uri, method='DELETE')
# XXX check that the response was actually successful
return True
def create_node(self, **kwargs):
"""Creates a RimuHosting instance
@inherits: L{NodeDriver.create_node}
@keyword name: Must be a FQDN. e.g example.com.
@type name: C{str}
@keyword ex_billing_oid: If not set,
a billing method is automatically picked.
@type ex_billing_oid: C{str}
@keyword ex_host_server_oid: The host server to set the VPS up on.
@type ex_host_server_oid: C{str}
@keyword ex_vps_order_oid_to_clone: Clone another VPS to use as
the image for the new VPS.
@type ex_vps_order_oid_to_clone: C{str}
@keyword ex_num_ips: Number of IPs to allocate. Defaults to 1.
@type ex_num_ips: C{int}
@keyword ex_extra_ip_reason: Reason for needing the extra IPs.
@type ex_extra_ip_reason: C{str}
@keyword ex_memory_mb: Memory to allocate to the VPS.
@type ex_memory_mb: C{int}
@keyword ex_disk_space_mb: Diskspace to allocate to the VPS.
Defaults to 4096 (4GB).
@type ex_disk_space_mb: C{int}
@keyword ex_disk_space_2_mb: Secondary disk size allocation.
Disabled by default.
@type ex_disk_space_2_mb: C{int}
@keyword ex_control_panel: Control panel to install on the VPS.
@type ex_control_panel: C{str}
"""
# Note we don't do much error checking in this because we
# expect the API to error out if there is a problem.
name = kwargs['name']
image = kwargs['image']
size = kwargs['size']
data = {
'instantiation_options': {
'domain_name': name, 'distro': image.id
},
'pricing_plan_code': size.id,
}
if 'ex_control_panel' in kwargs:
data['instantiation_options']['control_panel'] = \
kwargs['ex_control_panel']
if 'auth' in kwargs:
auth = kwargs['auth']
if not isinstance(auth, NodeAuthPassword):
raise ValueError('auth must be of NodeAuthPassword type')
data['instantiation_options']['password'] = auth.password
if 'ex_billing_oid' in kwargs:
#TODO check for valid oid.
data['billing_oid'] = kwargs['ex_billing_oid']
if 'ex_host_server_oid' in kwargs:
data['host_server_oid'] = kwargs['ex_host_server_oid']
if 'ex_vps_order_oid_to_clone' in kwargs:
data['vps_order_oid_to_clone'] = \
kwargs['ex_vps_order_oid_to_clone']
if 'ex_num_ips' in kwargs and int(kwargs['ex_num_ips']) > 1:
if not 'ex_extra_ip_reason' in kwargs:
raise RimuHostingException(
'Need an reason for having an extra IP')
else:
if not 'ip_request' in data:
data['ip_request'] = {}
data['ip_request']['num_ips'] = int(kwargs['ex_num_ips'])
data['ip_request']['extra_ip_reason'] = \
kwargs['ex_extra_ip_reason']
if 'ex_memory_mb' in kwargs:
if not 'vps_parameters' in data:
data['vps_parameters'] = {}
data['vps_parameters']['memory_mb'] = kwargs['ex_memory_mb']
if 'ex_disk_space_mb' in kwargs:
if not 'ex_vps_parameters' in data:
data['vps_parameters'] = {}
data['vps_parameters']['disk_space_mb'] = \
kwargs['ex_disk_space_mb']
if 'ex_disk_space_2_mb' in kwargs:
if not 'vps_parameters' in data:
data['vps_parameters'] = {}
data['vps_parameters']['disk_space_2_mb'] =\
kwargs['ex_disk_space_2_mb']
res = self.connection.request(
'/orders/new-vps',
method='POST',
data=json.dumps({"new-vps": data})
).object
node = self._to_node(res['about_order'])
node.extra['password'] = \
res['new_order_request']['instantiation_options']['password']
return node
def list_locations(self):
return [
NodeLocation('DCAUCKLAND', "RimuHosting Auckland", 'NZ', self),
NodeLocation('DCDALLAS', "RimuHosting Dallas", 'US', self),
NodeLocation('DCLONDON', "RimuHosting London", 'GB', self),
NodeLocation('DCSYDNEY', "RimuHosting Sydney", 'AU', self),
]
features = {"create_node": ["password"]}
|
IsCoolEntertainment/debpkg_libcloud
|
libcloud/compute/drivers/rimuhosting.py
|
Python
|
apache-2.0
| 12,620
|
#!/usr/bin/env python
#
# @file CMakeListsFile.py
# @brief class for generating cmake lists file
# @author Frank Bergmann
# @author Sarah Keating
# @author Matthew S. Gillman
#
# <!--------------------------------------------------------------------------
#
# Copyright (c) 2013-2018 by the California Institute of Technology
# (California, USA), the European Bioinformatics Institute (EMBL-EBI, UK)
# and the University of Heidelberg (Germany), with support from the National
# Institutes of Health (USA) under grant R01GM070923. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Neither the name of the California Institute of Technology (Caltech), nor
# of the European Bioinformatics Institute (EMBL-EBI), nor of the University
# of Heidelberg, nor the names of any contributors, may be used to endorse
# or promote products derived from this software without specific prior
# written permission.
# ------------------------------------------------------------------------ -->
from ..base_files import BaseTxtFile
from ..util import strFunctions, global_variables
class CMakeListsFile():
"""Class for CMake package files"""
def __init__(self, name, package):
self.package = package.lower()
self.fileout = BaseTxtFile.BaseTxtFile(name)
self.fileout.brief_description = \
'CMake build scripts for example programs'
self.cap_package = self.package.upper()
self.up_package = strFunctions.upper_first(self.package)
self.language = global_variables.language
self.cap_language = global_variables.language.upper()
self.open_br = '{'
self.close_br = '}'
########################################################################
# Write specific code
def write_examples(self):
fout = self.fileout
fout.skip_line()
fout.write_line('include_directories'
'(${0}CMAKE_CURRENT_SOURCE_DIR{1})'.
format(self.open_br, self.close_br))
fout.write_line('include_directories'
'(BEFORE ${0}LIBSBML_ROOT_SOURCE_DIR{1}/src)'.
format(self.open_br, self.close_br))
fout.write_line('include_directories'
'(${0}LIBSBML_ROOT_SOURCE_DIR{1}/include)'.
format(self.open_br, self.close_br))
fout.write_line('include_directories'
'(BEFORE ${0}LIBSBML_ROOT_BINARY_DIR{1}/src)'.
format(self.open_br, self.close_br))
fout.skip_line()
fout.write_line('if (EXTRA_INCLUDE_DIRS)')
fout.write_line('include_directories(${0}EXTRA_INCLUDE_DIRS{1})'.
format(self.open_br, self.close_br))
fout.write_line('endif (EXTRA_INCLUDE_DIRS)')
fout.skip_line()
fout.write_line('foreach (example')
fout.up_indent()
fout.skip_line()
fout.write_line('{0}_example1'.format(self.package))
fout.skip_line()
fout.down_indent()
fout.write_line(')')
fout.up_indent()
fout.write_line_verbatim(('add_executable'
'(example_{2}_cpp_${0}example{1}'
' ${0}example{1}.cpp ../util.c)')
.format(self.open_br,
self.close_br,
self.package))
fout.write_line_verbatim(('set_target_properties'
'(example_{2}_cpp_${0}example{1} '
'PROPERTIES OUTPUT_NAME '
'${0}example{1})').
format(self.open_br,
self.close_br,
self.package))
fout.write_line_verbatim(('target_link_libraries'
'(example_{2}_cpp_${0}example{1} '
'${0}LIBSBML_LIBRARY{1}-static)').
format(self.open_br,
self.close_br,
self.package))
fout.skip_line()
fout.write_line('if (WITH_LIBXML)')
fout.up_indent()
fout.write_line_verbatim(('target_link_libraries'
'(example_{2}_cpp_${0}example{1}'
' ${0}LIBXML_LIBRARY{1} '
'${0}EXTRA_LIBS{1})').
format(self.open_br,
self.close_br,
self.package))
fout.down_indent()
fout.write_line('endif(WITH_LIBXML)')
fout.skip_line()
fout.write_line('if (WITH_ZLIB)')
fout.up_indent()
fout.write_line_verbatim(('target_link_libraries'
'(example_{2}_cpp_${0}example{1}'
' ${0}LIBZ_LIBRARY{1})').
format(self.open_br,
self.close_br,
self.package))
fout.down_indent()
fout.write_line('endif(WITH_ZLIB)')
fout.skip_line()
fout.write_line('if (WITH_BZIP2)')
fout.up_indent()
fout.write_line_verbatim(('target_link_libraries'
'(example_{2}_cpp_${0}example{1}'
' ${0}LIBBZ_LIBRARY{1})').
format(self.open_br,
self.close_br,
self.package))
fout.down_indent()
fout.write_line('endif(WITH_BZIP2)')
fout.skip_line()
fout.down_indent()
fout.write_line('endforeach()')
fout.skip_line()
fout.write_line('# install c++ examples')
fout.write_line_verbatim('file(GLOB cpp_samples '
'\"${0}CMAKE_CURRENT_SOURCE_DIR{1}/*.c\"'.
format(self.open_br, self.close_br))
fout.write_line_verbatim(' '
'\"${0}CMAKE_CURRENT_SOURCE_DIR{1}/*.cpp\"'
.format(self.open_br, self.close_br))
fout.write_line_verbatim(' '
'\"${0}CMAKE_CURRENT_SOURCE_DIR{1}/*.h\")'.
format(self.open_br, self.close_br))
fout.write_line_verbatim(('install (FILES ${0}cpp_samples{1} '
'DESTINATION '
'${0}MISC_PREFIX{1}examples/c++/{2})').
format(self.open_br,
self.close_br,
self.package))
fout.skip_line()
########################################################################
# Write file
def write_file(self):
self.fileout.write_file()
self.write_examples()
def close_file(self):
self.fileout.close_file()
|
sbmlteam/deviser
|
deviser/cmake_files/CMakeListsFile.py
|
Python
|
lgpl-2.1
| 8,264
|
"""TESTS FOR SHOPPING LIST ITEMS"""
import unittest
from datetime import date
from app import shopping_lists_items
class TestCasesItems(unittest.TestCase):
"""TESTS FOR ITEMS CREATION AND BEHAVIOUR"""
def setUp(self):
self.item = shopping_lists_items.ShoppingListItems()
def tearDown(self):
del self.item
def test_sucessful_add_item(self):
"""CHECKS WHETHER AND ITEM CAN BE ADDED SUCESSFULLY"""
msg = self.item.add(
"Party", "Whisky", "dalton@yahoo.com")
self.assertEqual(
msg, [{'user': 'dalton@yahoo.com',
'list': 'Party',
'name': 'Whisky',
'number': 1,
'date':str(date.today())}])
def test_invalid_characters(self):
"""TESTS IF CODE ACCEPTS INVALID CHARACTERS"""
msg = self.item.add(
"Party", "Whisky!", "dalton@yahoo.com")
self.assertEqual(msg, "Invalid characters")
def test_sucess_edit_item(self):
""""CHECKS FOR SUCESSFUL ITEM EDITING"""
self.item.list_of_shopping_list_items = [{'user': 'dalton@yahoo.com',
'list': 'Adventure',
'name': 'Snacks'},
{'user': 'dalton@yahoo.com',
'list': 'Adventure',
'name': 'Booze'}]
msg = self.item.edit('Soda', 'Booze', 'Adventure', "dalton@yahoo.com")
self.assertEqual(msg, [{'user': 'dalton@yahoo.com',
'list': 'Adventure',
'name': 'Snacks'},
{'user': 'dalton@yahoo.com',
'list': 'Adventure',
'name': 'Soda'}])
def test_edit_existing_item(self):
"""Check if edit name provided is similar to an existing item
"""
self.item.list_of_shopping_list_items = [{'user': 'dalton@yahoo.com',
'list': 'Adventure',
'name': 'Snacks'},
{'user': 'dalton@yahoo.com',
'list': 'Adventure',
'name': 'Booze'}]
msg = self.item.edit(
'Snacks', 'Booze', 'Adventure', "dalton@yahoo.com")
self.assertEqual(msg, "Name already used on another item")
if __name__ == '__main__':
unittest.main()
|
parseendavid/Andela-Developer-Challenge---Shopping-List-V2.0
|
tests/items_test.py
|
Python
|
mit
| 2,665
|
#!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
from collections import OrderedDict
from calibre.ebooks.docx.block_styles import ( # noqa
inherit, simple_color, LINE_STYLES, simple_float, binary_property, read_shd)
# Read from XML {{{
def read_text_border(parent, dest, XPath, get):
border_color = border_style = border_width = padding = inherit
elems = XPath('./w:bdr')(parent)
if elems and elems[0].attrib:
border_color = simple_color('auto')
border_style = 'none'
border_width = 1
for elem in elems:
color = get(elem, 'w:color')
if color is not None:
border_color = simple_color(color)
style = get(elem, 'w:val')
if style is not None:
border_style = LINE_STYLES.get(style, 'solid')
space = get(elem, 'w:space')
if space is not None:
try:
padding = float(space)
except (ValueError, TypeError):
pass
sz = get(elem, 'w:sz')
if sz is not None:
# we dont care about art borders (they are only used for page borders)
try:
# A border of less than 1pt is not rendered by WebKit
border_width = min(96, max(8, float(sz))) / 8
except (ValueError, TypeError):
pass
setattr(dest, 'border_color', border_color)
setattr(dest, 'border_style', border_style)
setattr(dest, 'border_width', border_width)
setattr(dest, 'padding', padding)
def read_color(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:color[@w:val]')(parent):
val = get(col, 'w:val')
if not val:
continue
ans = simple_color(val)
setattr(dest, 'color', ans)
def convert_highlight_color(val):
return {
'darkBlue': '#000080', 'darkCyan': '#008080', 'darkGray': '#808080',
'darkGreen': '#008000', 'darkMagenta': '#800080', 'darkRed': '#800000', 'darkYellow': '#808000',
'lightGray': '#c0c0c0'}.get(val, val)
def read_highlight(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:highlight[@w:val]')(parent):
val = get(col, 'w:val')
if not val:
continue
if not val or val == 'none':
val = 'transparent'
else:
val = convert_highlight_color(val)
ans = val
setattr(dest, 'highlight', ans)
def read_lang(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:lang[@w:val]')(parent):
val = get(col, 'w:val')
if not val:
continue
try:
code = int(val, 16)
except (ValueError, TypeError):
ans = val
else:
from calibre.ebooks.docx.lcid import lcid
val = lcid.get(code, None)
if val:
ans = val
setattr(dest, 'lang', ans)
def read_letter_spacing(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:spacing[@w:val]')(parent):
val = simple_float(get(col, 'w:val'), 0.05)
if val is not None:
ans = val
setattr(dest, 'letter_spacing', ans)
def read_sz(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:sz[@w:val]')(parent):
val = simple_float(get(col, 'w:val'), 0.5)
if val is not None:
ans = val
setattr(dest, 'font_size', ans)
def read_underline(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:u[@w:val]')(parent):
val = get(col, 'w:val')
if val:
ans = val if val == 'none' else 'underline'
setattr(dest, 'text_decoration', ans)
def read_vert_align(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:vertAlign[@w:val]')(parent):
val = get(col, 'w:val')
if val and val in {'baseline', 'subscript', 'superscript'}:
ans = val
setattr(dest, 'vert_align', ans)
def read_position(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:position[@w:val]')(parent):
val = get(col, 'w:val')
try:
ans = float(val)/2.0
except Exception:
pass
setattr(dest, 'position', ans)
def read_font_family(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:rFonts')(parent):
val = get(col, 'w:asciiTheme')
if val:
val = '|%s|' % val
else:
val = get(col, 'w:ascii')
if val:
ans = val
setattr(dest, 'font_family', ans)
# }}}
class RunStyle(object):
all_properties = {
'b', 'bCs', 'caps', 'cs', 'dstrike', 'emboss', 'i', 'iCs', 'imprint',
'rtl', 'shadow', 'smallCaps', 'strike', 'vanish', 'webHidden',
'border_color', 'border_style', 'border_width', 'padding', 'color', 'highlight', 'background_color',
'letter_spacing', 'font_size', 'text_decoration', 'vert_align', 'lang', 'font_family', 'position',
}
toggle_properties = {
'b', 'bCs', 'caps', 'emboss', 'i', 'iCs', 'imprint', 'shadow', 'smallCaps', 'strike', 'dstrike', 'vanish',
}
def __init__(self, namespace, rPr=None):
self.namespace = namespace
self.linked_style = None
if rPr is None:
for p in self.all_properties:
setattr(self, p, inherit)
else:
for p in (
'b', 'bCs', 'caps', 'cs', 'dstrike', 'emboss', 'i', 'iCs', 'imprint', 'rtl', 'shadow',
'smallCaps', 'strike', 'vanish', 'webHidden',
):
setattr(self, p, binary_property(rPr, p, namespace.XPath, namespace.get))
for x in ('text_border', 'color', 'highlight', 'shd', 'letter_spacing', 'sz', 'underline', 'vert_align', 'position', 'lang', 'font_family'):
f = globals()['read_%s' % x]
f(rPr, self, namespace.XPath, namespace.get)
for s in namespace.XPath('./w:rStyle[@w:val]')(rPr):
self.linked_style = namespace.get(s, 'w:val')
self._css = None
def update(self, other):
for prop in self.all_properties:
nval = getattr(other, prop)
if nval is not inherit:
setattr(self, prop, nval)
if other.linked_style is not None:
self.linked_style = other.linked_style
def resolve_based_on(self, parent):
for p in self.all_properties:
val = getattr(self, p)
if val is inherit:
setattr(self, p, getattr(parent, p))
def get_border_css(self, ans):
for x in ('color', 'style', 'width'):
val = getattr(self, 'border_'+x)
if x == 'width' and val is not inherit:
val = '%.3gpt' % val
if val is not inherit:
ans['border-%s' % x] = val
def clear_border_css(self):
for x in ('color', 'style', 'width'):
setattr(self, 'border_'+x, inherit)
@property
def css(self):
if self._css is None:
c = self._css = OrderedDict()
td = set()
if self.text_decoration is not inherit:
td.add(self.text_decoration)
if self.strike and self.strike is not inherit:
td.add('line-through')
if self.dstrike and self.dstrike is not inherit:
td.add('line-through')
if td:
c['text-decoration'] = ' '.join(td)
if self.caps is True:
c['text-transform'] = 'uppercase'
if self.i is True:
c['font-style'] = 'italic'
if self.shadow and self.shadow is not inherit:
c['text-shadow'] = '2px 2px'
if self.smallCaps is True:
c['font-variant'] = 'small-caps'
if self.vanish is True or self.webHidden is True:
c['display'] = 'none'
self.get_border_css(c)
if self.padding is not inherit:
c['padding'] = '%.3gpt' % self.padding
for x in ('color', 'background_color'):
val = getattr(self, x)
if val is not inherit:
c[x.replace('_', '-')] = val
for x in ('letter_spacing', 'font_size'):
val = getattr(self, x)
if val is not inherit:
c[x.replace('_', '-')] = '%.3gpt' % val
if self.position is not inherit:
c['vertical-align'] = '%.3gpt' % self.position
if self.highlight is not inherit and self.highlight != 'transparent':
c['background-color'] = self.highlight
if self.b:
c['font-weight'] = 'bold'
if self.font_family is not inherit:
c['font-family'] = self.font_family
return self._css
def same_border(self, other):
return self.get_border_css({}) == other.get_border_css({})
|
ashang/calibre
|
src/calibre/ebooks/docx/char_styles.py
|
Python
|
gpl-3.0
| 9,147
|
# Reserved constant used as key in other_args_to_resolve to configure if we
# return sync or async handle of a deployment.
# True -> RayServeSyncHandle
# False -> RayServeHandle
USE_SYNC_HANDLE_KEY = "__use_sync_handle__"
|
ray-project/ray
|
python/ray/serve/pipeline/constants.py
|
Python
|
apache-2.0
| 222
|
# Copyright 2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
class ShelveUnicodeWrapper(object):
"""
Convert unicode to str and back again, since python-2.x shelve
module doesn't support unicode.
"""
def __init__(self, shelve_instance):
self._shelve = shelve_instance
def _encode(self, s):
if isinstance(s, unicode):
s = s.encode('utf_8')
return s
def __len__(self):
return len(self._shelve)
def __contains__(self, k):
return self._encode(k) in self._shelve
def __iter__(self):
return self._shelve.__iter__()
def items(self):
return self._shelve.iteritems()
def __setitem__(self, k, v):
self._shelve[self._encode(k)] = self._encode(v)
def __getitem__(self, k):
return self._shelve[self._encode(k)]
def __delitem__(self, k):
del self._shelve[self._encode(k)]
def get(self, k, *args):
return self._shelve.get(self._encode(k), *args)
def close(self):
self._shelve.close()
def clear(self):
self._shelve.clear()
|
funtoo/portage-funtoo
|
pym/portage/util/_ShelveUnicodeWrapper.py
|
Python
|
gpl-2.0
| 1,005
|
import sys
import argparse
from itertools import izip
import math
def parseArgument():
# Parse the input
parser = argparse.ArgumentParser(description = "Make regions with 0 signal the average of their surrounding regions")
parser.add_argument("--signalsFileName", required=True, help='Signals file')
parser.add_argument("--peakIndexesFileName", required=True, help='Peak indexes file')
parser.add_argument("--outputFileName", required=True, help='Output file, where signals that were 0 will be the average of their surrounding signals')
options = parser.parse_args();
return options
def averageZeroSignalsWithinPeaks(options):
signalsFile = open(options.signalsFileName)
peakIndexesFile = open(options.peakIndexesFileName)
outputFile = open(options.outputFileName, 'w+')
lastSignal = None
lastLastSignal = None
lastPeakIndex = None
lastLastPeakIndex = None
for signalsLine, peakIndexesLine in izip(signalsFile, peakIndexesFile):
# Iterate through the signals and set those that are zero to the average of those of the surrounding regions
signal = float(signalsLine.strip())
peakIndex = int(peakIndexesLine.strip())
if lastSignal == 0:
# The previous signal was a zero, so set it to the average of the surrounding signals
if (peakIndex == lastPeakIndex) and (not math.isnan(lastSignal)):
# Include the current region in the average
if (lastPeakIndex == lastLastPeakIndex) and (not math.isnan(lastLastSignal)):
# Include the region before the previous region in the average
if not math.isnan(signal):
# The current signal is not a nan, so include it in the average
lastSignalCorrected = (signal + lastLastSignal)/2.0
outputFile.write(str(lastSignalCorrected) + "\n")
else:
# The current signal is a nan, so use only the previous signal
outputFile.write(str(lastLastSignal) + "\n")
elif not math.isnan(signal):
outputFile.write(str(signal) + "\n")
else:
outputFile.write(str(lastSignal) + "\n")
elif (lastPeakIndex == lastLastPeakIndex) and (not math.isnan(lastLastSignal)):
# Set the output to the region before it
outputFile.write(str(lastLastSignal) + "\n")
else:
outputFile.write(str(lastSignal) + "\n")
if signal != 0:
# The current signal is not 0, so record it
outputFile.write(str(signal) + "\n")
lastLastSignal = lastSignal
lastLastPeakIndex = lastPeakIndex
lastSignal = signal
lastPeakIndex = peakIndex
if lastSignal == 0:
# The final signal was a zero, so set it to the signal before it
if (lastPeakIndex == lastLastPeakIndex) and (not math.isnan(lastLastSignal)):
# Set the output to the region before it
outputFile.write(str(lastLastSignal) + "\n")
else:
outputFile.write(str(lastSignal) + "\n")
signalsFile.close()
peakIndexesFile.close()
outputFile.close()
if __name__=="__main__":
options = parseArgument()
averageZeroSignalsWithinPeaks(options)
|
imk1/IMKTFBindingCode
|
averageZeroSignalsWithinPeaks.py
|
Python
|
mit
| 2,918
|
# -*- encoding: utf-8 -*-
'''
Created on 18/05/2013
@author: romuloigor
'''
import datetime
from django.conf import settings
from django.core.cache import cache, get_cache
from django.utils.importlib import import_module
class SessionExpiredMiddleware:
def process_request(self, request):
if settings.ENABLE_SESSIONS_TIMEOUT:
if request.user.is_authenticated():
current_datetime = datetime.datetime.now()
if request.session.has_key('last_activity') and (current_datetime - request.session['last_activity']).seconds > settings.SESSION_TIMEOUT:
messages.add_message(request, messages.ERROR, 'Apos um periodo sem atividade sua sessão foi encerrada!' )
logout(request)
else:
request.session['last_activity'] = current_datetime
return None
class MultiLoginRestrictMiddleware(object):
def process_request(self, request):
if settings.ENABLE_SIMULTANEOUS_SESSIONS_LOGINS:
if request.user.is_authenticated():
cache = get_cache('default')
cache_timeout = settings.SESSION_TIMEOUT
cache_key = "user_pk_%s_restrict" % request.user.pk
cache_value = cache.get(cache_key)
if cache_value is not None:
if request.session.session_key != cache_value:
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore(session_key=cache_value)
session.delete()
cache.set(cache_key, request.session.session_key, cache_timeout)
else:
cache.set(cache_key, request.session.session_key, cache_timeout)
return None
|
solidit/myproject
|
portal/middleware/multilogin_restrict.py
|
Python
|
unlicense
| 1,882
|
import pytest
from test.conftest import got_postgresql
@pytest.mark.skipif(not got_postgresql(), reason='Needs postgresql')
class TestSqlitePostgresSchemaEquivalence(object):
def test_equality(self, schema_sl, schema_pg):
sl_table_names = set([t.name for t in schema_sl.tables])
pg_table_names = set([t.name for t in schema_pg.tables])
assert sl_table_names == pg_table_names
for table_name in pg_table_names:
sl_table = schema_sl.tables_by_name[table_name]
pg_table = schema_pg.tables_by_name[table_name]
sl_col_names = set([c.name for c in sl_table.cols])
pg_col_names = set([c.name for c in pg_table.cols])
assert sl_col_names == pg_col_names
def remove_name_from_relations(relations):
for r in relations:
del r['name']
sl_relations = remove_name_from_relations(schema_sl.relations())
pg_relations = remove_name_from_relations(schema_pg.relations())
assert sl_relations == pg_relations
|
freewilll/abridger
|
test/postgresql/test_sqlite_postgres_schema_equivalence.py
|
Python
|
mit
| 1,077
|
import os
import re
import json
import datetime
import collections
from pycmark.util.enum import enum
################################################################################
ActionEnum = enum(['Created', 'Renamed', 'Updated', 'Deleted'], name='ActionEnum')
################################################################################
class Action(collections.namedtuple('Action', ['time', 'action', 'note', 'extra'])):
"""Container class for note history updates"""
def __repr__(self):
t = self.time.strftime('%Y-%m-%d %H:%M')
action = ActionEnum[self.action]
if self.action != ActionEnum.Renamed:
return '%s %s %s' % (t, action, self.note.mdFile.__repr__())
else:
return '%s %s %s -> %s' % (t, action, self.extra.__repr__(), self.note.mdFile.__repr__())
@property
def guid(self):
return self.note.note['guid']
################################################################################
class HistoryHandler(ActionEnum.Visitor):
"""Visitor class to process EvernoteMetadata note history"""
def __init__(self, interleave=False):
super(HistoryHandler, self).__init__()
self.active_notes = {}
self.ignored = {}
self.interleave = interleave
self.history = []
def _add(self, action):
if action.note.mdFile not in self.active_notes:
self.active_notes[action.note.mdFile] = {action.guid}
else:
self.active_notes[action.note.mdFile].add(action.guid)
self.history.append(action)
def visitCreated(self, action):
self._add(action)
def visitDeleted(self, action):
ref = self.active_notes[action.note.mdFile]
if len(ref) == 1: # only one reference, so delete is unambiguous
del self.active_notes[action.note.mdFile]
self.history.append(action)
else: # ambiguous delete
ref.remove(action.guid)
if self.interleave: # just ignore deletion if interleaving
pass
else: # otherwise strip duplicate note from history
self.history = [h for h in self.history if h.guid != action.guid]
def visitRenamed(self, action):
ref = self.active_notes[action.extra]
if len(ref) == 1:
del self.active_notes[action.extra]
else:
raise NotImplementedError("Logic not yet implemented for unambiguous rename")
self._add(action)
def visitUpdated(self, action):
self.history.append(action)
################################################################################
class EvernoteMetadata(object):
"""Container for evernote synchronization metadata"""
def __init__(self, syncdata):
assert os.path.isdir(syncdata)
assert os.path.isfile(os.path.join(syncdata, 'metadata.json'))
# load metadata
self.syncdata = syncdata
with open(os.path.join(syncdata, 'metadata.json'), 'rt') as F:
self.metadata = json.load(F)
# extract metadata. use history so most recent version is reflected
self.tags = {}
for tag in sorted(self.metadata['tags'], key=lambda t: t['updateSequenceNum']):
self.tags[tag['guid']] = tag
self.notebooks = {}
for nb in sorted(self.metadata['notebooks'], key=lambda n: n['updateSequenceNum']):
self.notebooks[nb['guid']] = nb
self.notes = {}
for note in sorted(self.metadata['notes'], key=lambda n: n['updateSequenceNum']):
self.notes[note['guid']] = EvernoteNote(note, self.tags, self.notebooks, self.syncdata)
def history(self, tagFilter='markdown', ignore_empty=True):
"""Sequence of Actions reflecting note history"""
history = []
for guid, note in self.notes.items():
if tagFilter is None or tagFilter in note.tags:
versions = sorted(note.versions(), key=lambda v: v.updatedTime)
if ignore_empty:
versions = [v for v in versions if v.textContent() != '']
if len(versions) == 0:
continue
history.append(Action(note.createdTime, ActionEnum.Created, versions[0], None))
previous = versions[0]
for i, v in enumerate(versions[1:]):
if v.updatedTime > note.createdTime:
if v.mdFile != previous.mdFile:
history.append(Action(v.updatedTime, ActionEnum.Renamed, v, previous.mdFile))
else:
history.append(Action(v.updatedTime, ActionEnum.Updated, v, None))
previous = v
if note.deleted:
history.append(Action(note.deletedTime, ActionEnum.Deleted, versions[-1], None))
return history
def cleanHistory(self, sort=True, **kwargs):
"""Sequence of Actions reflecting note history with duplicates stripped"""
history = self.history(**kwargs)
if sort:
history.sort()
hh = HistoryHandler()
for entry in history:
hh.visit(entry.action, entry)
return hh.history
################################################################################
class EvernoteNote(object):
def __init__(self, note, tags, notebooks, syncdata, title=None, updated=None, sequence=None):
self.note = note
self._tags = tags
self._notebooks = notebooks
self._syncdata = syncdata
self.updated = note['updated'] if updated is None else updated
self.sequence = note['updateSequenceNum'] if sequence is None else sequence
self.title = note['title'] if title is None else title
def __repr__(self):
return 'title: {}, notebook: {}, tags: {}'.format(self.title, self.notebook, self.tags)
def __lt__(self, other):
return self.updated < other.updated
@property
def tags(self):
return [] if self.note['tagGuids'] is None else [self._tags[g]['name'] for g in self.note['tagGuids']]
@property
def notebook(self):
return self._notebooks[self.note['notebookGuid']]['name']
@property
def location(self):
return os.path.join(self._syncdata, 'notes', self.note['guid'])
@property
def deleted(self):
return self.note['deleted'] is not None
@property
def escapedtitle(self):
return self.title.replace('/', '%2f')
@property
def createdTime(self):
return datetime.datetime.utcfromtimestamp(self.note['created'] / 1000.0)
@property
def updatedTime(self):
return datetime.datetime.utcfromtimestamp(self.updated / 1000.0)
@property
def deletedTime(self):
return datetime.datetime.utcfromtimestamp(self.note['deleted'] / 1000.0)
@property
def mdFile(self):
return os.path.join(self.notebook, self.title.replace('/', '_') + '.md')
def versions(self):
out = [self]
with open(os.path.join(self.location, 'versions.json'), 'rt') as F:
for v in json.load(F):
out.append(self.__class__(self.note, self._tags, self._notebooks, self._syncdata,
title=v['title'], updated=v['updated'], sequence=v['updateSequenceNum']))
return out
def content(self):
with open(os.path.join(self.location, '%d.xml' % self.sequence), 'rt') as F:
return F.read()
def textContent(self):
return self.markdownContent(self.content())
@classmethod
def markdownContent(cls, note_xml):
a = note_xml.find('>', note_xml.find('<en-note')) + 1
b = note_xml.find('</en-note>', a)
return cls.clearNoteFormatting(note_xml[a:b])
@staticmethod
def clearNoteFormatting(en_note):
"""Convert Evernote note content to plain text"""
assert isinstance(en_note, str) # assuming we are working with an ascii string, not unicode
en_note = re.sub(r'\s*<span.*?>([\S\s]*?)<\/span>\s*', r'\1', en_note) # clear span tags
en_note = en_note.replace('\n', '') # clear newlines
en_note = re.sub(r'<\/div>[\s\n]*<div>', r'</div><div>', en_note) # clear whitespace between div tags
en_note = re.sub(r'<div><br.*?><\/div>', r'</div><div>', en_note) # convert <div><br/></div> to <div></div>
en_note = re.sub(r'(<\/?div>){1,2}', r'\n', en_note) # convert <div> boundaries to newlines
en_note = re.sub(r'<br.*?>', r'\n', en_note) # convert <br> to newlines
en_note = re.sub(r'<.*?>', r'', en_note) # strip any remaining tags
# en_note = en_note.replace(u'\u00A0', ' ') # non-breaking spaces to spaces
en_note = en_note.replace('\xc2\xa0', ' ') # non-breaking spaces to spaces
en_note = en_note.replace(' ' , ' ') # -> ' '
en_note = en_note.replace('<' , '<') # < -> '<'
en_note = en_note.replace('>' , '>') # > -> '>'
en_note = en_note.replace(''' , "'") # ' -> "'"
en_note = en_note.replace('"' , '"') # " -> '"'
en_note = en_note.replace('|' , '|') # | -> '|'
en_note = en_note.replace('&' , '&') # & -> '&'
en_note = re.sub(r'^\n', '', en_note) # clear leading newline
en_note = re.sub(r' +\n', '\n', en_note) # clear trailing whitespace
return en_note
################################################################################
|
daryl314/markdown-browser
|
pyevernote/EvernoteMetadata.py
|
Python
|
mit
| 10,044
|
# -*- coding: utf-8 -*-
"""
zine.docs.builder
~~~~~~~~~~~~~~~~~~~~~~
The documentation building system. This is only used by the
documentation building script.
:copyright: (c) 2010 by the Zine Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import cPickle as pickle
from urlparse import urlparse
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.core import publish_parts
from docutils.writers import html4css1
_toc_re = re.compile(r'<!-- TOC -->(.*?)<!-- /TOC -->(?s)')
_toc_contents_re = re.compile(r'<ul[^>]*>(.*)</ul>(?s)')
def plugin_links_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return [nodes.comment('', 'PLUGIN_LINKS')]
plugin_links_directive.arguments = (0, 0, 0)
plugin_links_directive.content = 1
directives.register_directive('plugin_links', plugin_links_directive)
def is_relative_uri(uri):
if uri.startswith('/'):
return False
# there is no uri parser, but the url parser works mostly
return not urlparse(uri)[0]
class Translator(html4css1.HTMLTranslator):
pass
class DocumentationWriter(html4css1.Writer):
def __init__(self):
html4css1.Writer.__init__(self)
self.translator_class = Translator
def generate_documentation(data):
toc = '\n\n..\n TOC\n\n.. contents::\n\n..\n /TOC'
parts = publish_parts(data + toc,
writer=DocumentationWriter(),
settings_overrides=dict(
initial_header_level=2,
field_name_limit=50
)
)
toc = None
body = parts['body']
match = _toc_re.search(body)
body = body[:match.start()] + body[match.end():]
match = _toc_contents_re.search(match.group(1))
if match is not None:
toc = match.group(1)
# just add the toc if there are at least two entries.
if toc.count('</li>') < 2:
toc = None
return {
'title': parts['title'],
'body': body,
'toc': toc
}
def walk(directory, callback=lambda filename: None):
"""Walk a directory and translate all the files in there."""
directory = os.path.normpath(directory)
for dirpath, dirnames, filenames in os.walk(directory):
for filename in filenames:
if filename.endswith('.rst'):
relname = os.path.join(dirpath, filename)[len(directory) + 1:]
f = file(os.path.join(dirpath, filename))
try:
d = generate_documentation(f.read().decode('utf-8'))
finally:
f.close()
f = file(os.path.join(dirpath, filename[:-3] + 'page'), 'wb')
try:
pickle.dump(d, f, protocol=2)
finally:
f.close()
callback(relname)
|
mitsuhiko/zine
|
zine/docs/builder.py
|
Python
|
bsd-3-clause
| 2,947
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Virt management features
Copyright 2007, 2012 Red Hat, Inc
Michael DeHaan <michael.dehaan@gmail.com>
Seth Vidal <skvidal@fedoraproject.org>
This software may be freely redistributed under the terms of the GNU
general public license.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: virt
short_description: Manages virtual machines supported by libvirt
description:
- Manages virtual machines supported by I(libvirt).
version_added: "0.2"
options:
name:
description:
- name of the guest VM being managed. Note that VM must be previously
defined with xml.
required: true
default: null
aliases: []
state:
description:
- Note that there may be some lag for state requests like C(shutdown)
since these refer only to VM states. After starting a guest, it may not
be immediately accessible.
required: false
choices: [ "running", "shutdown", "destroyed", "paused" ]
default: "no"
command:
description:
- in addition to state management, various non-idempotent commands are available. See examples
required: false
choices: ["create","status", "start", "stop", "pause", "unpause",
"shutdown", "undefine", "destroy", "get_xml", "autostart",
"freemem", "list_vms", "info", "nodeinfo", "virttype", "define"]
uri:
description:
- libvirt connection uri
required: false
defaults: qemu:///system
xml:
description:
- XML document used with the define command
required: false
default: null
requirements:
- "python >= 2.6"
- "libvirt-python"
author:
- "Ansible Core Team"
- "Michael DeHaan"
- "Seth Vidal"
'''
EXAMPLES = '''
# a playbook task line:
- virt: name=alpha state=running
# /usr/bin/ansible invocations
ansible host -m virt -a "name=alpha command=status"
ansible host -m virt -a "name=alpha command=get_xml"
ansible host -m virt -a "name=alpha command=create uri=lxc:///"
# a playbook example of defining and launching an LXC guest
tasks:
- name: define vm
virt: name=foo
command=define
xml="{{ lookup('template', 'container-template.xml.j2') }}"
uri=lxc:///
- name: start vm
virt: name=foo state=running uri=lxc:///
'''
RETURN = '''
# for list_vms command
list_vms:
description: The list of vms defined on the remote system
type: dictionary
returned: success
sample: [
"build.example.org",
"dev.example.org"
]
# for status command
status:
description: The status of the VM, among running, crashed, paused and shutdown
type: string
sample: "success"
returned: success
'''
VIRT_FAILED = 1
VIRT_SUCCESS = 0
VIRT_UNAVAILABLE=2
import sys
try:
import libvirt
except ImportError:
HAS_VIRT = False
else:
HAS_VIRT = True
ALL_COMMANDS = []
VM_COMMANDS = ['create','status', 'start', 'stop', 'pause', 'unpause',
'shutdown', 'undefine', 'destroy', 'get_xml', 'autostart', 'define']
HOST_COMMANDS = ['freemem', 'list_vms', 'info', 'nodeinfo', 'virttype']
ALL_COMMANDS.extend(VM_COMMANDS)
ALL_COMMANDS.extend(HOST_COMMANDS)
VIRT_STATE_NAME_MAP = {
0 : "running",
1 : "running",
2 : "running",
3 : "paused",
4 : "shutdown",
5 : "shutdown",
6 : "crashed"
}
class VMNotFound(Exception):
pass
class LibvirtConnection(object):
def __init__(self, uri, module):
self.module = module
cmd = "uname -r"
rc, stdout, stderr = self.module.run_command(cmd)
if "xen" in stdout:
conn = libvirt.open(None)
elif "esx" in uri:
auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], [], None]
conn = libvirt.openAuth(uri, auth)
else:
conn = libvirt.open(uri)
if not conn:
raise Exception("hypervisor connection failure")
self.conn = conn
def find_vm(self, vmid):
"""
Extra bonus feature: vmid = -1 returns a list of everything
"""
conn = self.conn
vms = []
# this block of code borrowed from virt-manager:
# get working domain's name
ids = conn.listDomainsID()
for id in ids:
vm = conn.lookupByID(id)
vms.append(vm)
# get defined domain
names = conn.listDefinedDomains()
for name in names:
vm = conn.lookupByName(name)
vms.append(vm)
if vmid == -1:
return vms
for vm in vms:
if vm.name() == vmid:
return vm
raise VMNotFound("virtual machine %s not found" % vmid)
def shutdown(self, vmid):
return self.find_vm(vmid).shutdown()
def pause(self, vmid):
return self.suspend(self.conn,vmid)
def unpause(self, vmid):
return self.resume(self.conn,vmid)
def suspend(self, vmid):
return self.find_vm(vmid).suspend()
def resume(self, vmid):
return self.find_vm(vmid).resume()
def create(self, vmid):
return self.find_vm(vmid).create()
def destroy(self, vmid):
return self.find_vm(vmid).destroy()
def undefine(self, vmid):
return self.find_vm(vmid).undefine()
def get_status2(self, vm):
state = vm.info()[0]
return VIRT_STATE_NAME_MAP.get(state,"unknown")
def get_status(self, vmid):
state = self.find_vm(vmid).info()[0]
return VIRT_STATE_NAME_MAP.get(state,"unknown")
def nodeinfo(self):
return self.conn.getInfo()
def get_type(self):
return self.conn.getType()
def get_xml(self, vmid):
vm = self.conn.lookupByName(vmid)
return vm.XMLDesc(0)
def get_maxVcpus(self, vmid):
vm = self.conn.lookupByName(vmid)
return vm.maxVcpus()
def get_maxMemory(self, vmid):
vm = self.conn.lookupByName(vmid)
return vm.maxMemory()
def getFreeMemory(self):
return self.conn.getFreeMemory()
def get_autostart(self, vmid):
vm = self.conn.lookupByName(vmid)
return vm.autostart()
def set_autostart(self, vmid, val):
vm = self.conn.lookupByName(vmid)
return vm.setAutostart(val)
def define_from_xml(self, xml):
return self.conn.defineXML(xml)
class Virt(object):
def __init__(self, uri, module):
self.module = module
self.uri = uri
def __get_conn(self):
self.conn = LibvirtConnection(self.uri, self.module)
return self.conn
def get_vm(self, vmid):
self.__get_conn()
return self.conn.find_vm(vmid)
def state(self):
vms = self.list_vms()
state = []
for vm in vms:
state_blurb = self.conn.get_status(vm)
state.append("%s %s" % (vm,state_blurb))
return state
def info(self):
vms = self.list_vms()
info = dict()
for vm in vms:
data = self.conn.find_vm(vm).info()
# libvirt returns maxMem, memory, and cpuTime as long()'s, which
# xmlrpclib tries to convert to regular int's during serialization.
# This throws exceptions, so convert them to strings here and
# assume the other end of the xmlrpc connection can figure things
# out or doesn't care.
info[vm] = {
"state" : VIRT_STATE_NAME_MAP.get(data[0],"unknown"),
"maxMem" : str(data[1]),
"memory" : str(data[2]),
"nrVirtCpu" : data[3],
"cpuTime" : str(data[4]),
}
info[vm]["autostart"] = self.conn.get_autostart(vm)
return info
def nodeinfo(self):
self.__get_conn()
info = dict()
data = self.conn.nodeinfo()
info = {
"cpumodel" : str(data[0]),
"phymemory" : str(data[1]),
"cpus" : str(data[2]),
"cpumhz" : str(data[3]),
"numanodes" : str(data[4]),
"sockets" : str(data[5]),
"cpucores" : str(data[6]),
"cputhreads" : str(data[7])
}
return info
def list_vms(self, state=None):
self.conn = self.__get_conn()
vms = self.conn.find_vm(-1)
results = []
for x in vms:
try:
if state:
vmstate = self.conn.get_status2(x)
if vmstate == state:
results.append(x.name())
else:
results.append(x.name())
except:
pass
return results
def virttype(self):
return self.__get_conn().get_type()
def autostart(self, vmid):
self.conn = self.__get_conn()
return self.conn.set_autostart(vmid, True)
def freemem(self):
self.conn = self.__get_conn()
return self.conn.getFreeMemory()
def shutdown(self, vmid):
""" Make the machine with the given vmid stop running. Whatever that takes. """
self.__get_conn()
self.conn.shutdown(vmid)
return 0
def pause(self, vmid):
""" Pause the machine with the given vmid. """
self.__get_conn()
return self.conn.suspend(vmid)
def unpause(self, vmid):
""" Unpause the machine with the given vmid. """
self.__get_conn()
return self.conn.resume(vmid)
def create(self, vmid):
""" Start the machine via the given vmid """
self.__get_conn()
return self.conn.create(vmid)
def start(self, vmid):
""" Start the machine via the given id/name """
self.__get_conn()
return self.conn.create(vmid)
def destroy(self, vmid):
""" Pull the virtual power from the virtual domain, giving it virtually no time to virtually shut down. """
self.__get_conn()
return self.conn.destroy(vmid)
def undefine(self, vmid):
""" Stop a domain, and then wipe it from the face of the earth. (delete disk/config file) """
self.__get_conn()
return self.conn.undefine(vmid)
def status(self, vmid):
"""
Return a state suitable for server consumption. Aka, codes.py values, not XM output.
"""
self.__get_conn()
return self.conn.get_status(vmid)
def get_xml(self, vmid):
"""
Receive a Vm id as input
Return an xml describing vm config returned by a libvirt call
"""
self.__get_conn()
return self.conn.get_xml(vmid)
def get_maxVcpus(self, vmid):
"""
Gets the max number of VCPUs on a guest
"""
self.__get_conn()
return self.conn.get_maxVcpus(vmid)
def get_max_memory(self, vmid):
"""
Gets the max memory on a guest
"""
self.__get_conn()
return self.conn.get_MaxMemory(vmid)
def define(self, xml):
"""
Define a guest with the given xml
"""
self.__get_conn()
return self.conn.define_from_xml(xml)
def core(module):
state = module.params.get('state', None)
guest = module.params.get('name', None)
command = module.params.get('command', None)
uri = module.params.get('uri', None)
xml = module.params.get('xml', None)
v = Virt(uri, module)
res = {}
if state and command=='list_vms':
res = v.list_vms(state=state)
if type(res) != dict:
res = { command: res }
return VIRT_SUCCESS, res
if state:
if not guest:
module.fail_json(msg = "state change requires a guest specified")
res['changed'] = False
if state == 'running':
if v.status(guest) is 'paused':
res['changed'] = True
res['msg'] = v.unpause(guest)
elif v.status(guest) is not 'running':
res['changed'] = True
res['msg'] = v.start(guest)
elif state == 'shutdown':
if v.status(guest) is not 'shutdown':
res['changed'] = True
res['msg'] = v.shutdown(guest)
elif state == 'destroyed':
if v.status(guest) is not 'shutdown':
res['changed'] = True
res['msg'] = v.destroy(guest)
elif state == 'paused':
if v.status(guest) is 'running':
res['changed'] = True
res['msg'] = v.pause(guest)
else:
module.fail_json(msg="unexpected state")
return VIRT_SUCCESS, res
if command:
if command in VM_COMMANDS:
if not guest:
module.fail_json(msg = "%s requires 1 argument: guest" % command)
if command == 'define':
if not xml:
module.fail_json(msg = "define requires xml argument")
try:
v.get_vm(guest)
except VMNotFound:
v.define(xml)
res = {'changed': True, 'created': guest}
return VIRT_SUCCESS, res
res = getattr(v, command)(guest)
if type(res) != dict:
res = { command: res }
return VIRT_SUCCESS, res
elif hasattr(v, command):
res = getattr(v, command)()
if type(res) != dict:
res = { command: res }
return VIRT_SUCCESS, res
else:
module.fail_json(msg="Command %s not recognized" % basecmd)
module.fail_json(msg="expected state or command parameter to be specified")
def main():
module = AnsibleModule(argument_spec=dict(
name = dict(aliases=['guest']),
state = dict(choices=['running', 'shutdown', 'destroyed', 'paused']),
command = dict(choices=ALL_COMMANDS),
uri = dict(default='qemu:///system'),
xml = dict(),
))
if not HAS_VIRT:
module.fail_json(
msg='The `libvirt` module is not importable. Check the requirements.'
)
rc = VIRT_SUCCESS
try:
rc, result = core(module)
except Exception, e:
module.fail_json(msg=str(e))
if rc != 0: # something went wrong emit the msg
module.fail_json(rc=rc, msg=result)
else:
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
|
milad-soufastai/ansible-modules-extras
|
cloud/misc/virt.py
|
Python
|
gpl-3.0
| 14,618
|
from lxml.builder import E
from jnpr.junos.utils.util import Util
from jnpr.junos.utils.start_shell import StartShell
class FS(Util):
"""
Filesystem (FS) utilities:
cat - show the contents of a file
checksum - calculate file checksum (md5,sha256,sha1)
copy - local file copy (not scp)
cwd - change working directory
ls - return file/dir listing
mkdir - create a directory
pwd - get working directory
rename - local file rename
rm - local file delete
rmdir - remove a directory
stat - return file/dir information
storage_usage - return storage usage
storage_cleanup - perform storage storage_cleanup
storage_cleanup_check - returns a list of files to remove at cleanup
symlink - create a symlink
tgz - tar+gzip a directory
NOTES:
The following methods require 'start shell' priveldges:
[mkdir, rmdir, symlink]
"""
# -------------------------------------------------------------------------
# cat - show file contents
# -------------------------------------------------------------------------
def cat(self, path):
"""
returns the contents of the file :path:
"""
try:
rsp = self._dev.rpc.file_show(filename=path)
except:
return None
return rsp.text
# -------------------------------------------------------------------------
# cwd - change working directory
# -------------------------------------------------------------------------
def cwd(self, path):
"""
change working directory to path
"""
self._dev.rpc.set_cli_working_directory(directory=path)
# -------------------------------------------------------------------------
# pwd - return current working directory
# -------------------------------------------------------------------------
def pwd(self):
"""
returns the current working directory
"""
rsp = self._dev.rpc(E.command("show cli directory"))
return rsp.findtext('./working-directory')
# -------------------------------------------------------------------------
# checksum - compute file checksum
# -------------------------------------------------------------------------
def checksum(self, path, calc='md5'):
"""
performs the checksum command on the given file path using the
required calculation method ['md5', 'sha256', 'sha1'] and returns
the string value. if the :path: is not found on the device, then
None is returned.
"""
cmd_map = {
'md5': self._dev.rpc.get_checksum_information,
'sha256': self._dev.rpc.get_sha256_checksum_information,
'sha1': self._dev.rpc.get_sha1_checksum_information
}
rpc = cmd_map.get(calc)
if rpc is None:
raise ValueError("Unknown calculation method: '%s'" % calc)
try:
rsp = rpc(path=path)
return rsp.findtext('.//checksum').strip()
except:
# the only exception is that the path is not found
return None
@classmethod
def _decode_file(cls, fileinfo):
results = {}
not_file = fileinfo.xpath('file-directory | file-symlink-target')
if len(not_file):
results['type'] = {'file-directory': 'dir',
'file-symlink-target': 'link'}[not_file[0].tag]
if 'link' == results['type']:
results['link'] = not_file[0].text.strip()
else:
results['type'] = 'file'
results['path'] = fileinfo.findtext('file-name').strip()
results['owner'] = fileinfo.findtext('file-owner').strip()
results['size'] = int(fileinfo.findtext('file-size'))
fper = fileinfo.find('file-permissions')
results['permissions'] = int(fper.text.strip())
results['permissions_text'] = fper.get('format')
fdate = fileinfo.find('file-date')
results['ts_date'] = fdate.get('format')
results['ts_epoc'] = fdate.text.strip()
return results
@classmethod
def _decode_dir(cls, dirinfo, files=None):
results = {}
results['type'] = 'dir'
results['path'] = dirinfo.get('name')
if files is None:
files = dirinfo.xpath('file-information')
results['file_count'] = len(files)
results['size'] = sum([int(f.findtext('file-size')) for f in files])
return results
# -------------------------------------------------------------------------
# stat - file information
# -------------------------------------------------------------------------
def stat(self, path):
"""
Returns a dictionary of status information on the path, or None
if the path does not exist.
@@@ MORE NEEDED @@@
"""
rsp = self._dev.rpc.file_list(detail=True, path=path)
# if there is an output tag, then it means that the path
# was not found
if rsp.find('output') is not None:
return None
# ok, so we've either got a directory or a file at
# this point, so decode accordingly
xdir = rsp.find('directory')
if xdir.get('name'): # then this is a directory path
return FS._decode_dir(xdir)
else:
return FS._decode_file(xdir.find('file-information'))
# -------------------------------------------------------------------------
# ls - file/dir listing
# -------------------------------------------------------------------------
def ls(self, path='.', brief=False, followlink=True):
"""
File listing, returns a dict of file information. If the
path is a symlink, then by default (:followlink):) will
recursively call this method to obtain the symlink specific
information.
"""
rsp = self._dev.rpc.file_list(detail=True, path=path)
# if there is an output tag, then it means that the path
# was not found, and we return :None:
if rsp.find('output') is not None:
return None
xdir = rsp.find('directory')
# check to see if the directory element has a :name:
# attribute, and if it does not, then this is a file, and
# decode accordingly. If the file is a symlink, then we
# want to follow the symlink to get what we want.
if not xdir.get('name'):
results = FS._decode_file(xdir.find('file-information'))
link_path = results.get('link')
if not link_path: # then we are done
return results
else:
return results if followlink is False else self.ls(
path=link_path)
# if we are here, then it's a directory, include information on all
# files
files = xdir.xpath('file-information')
results = FS._decode_dir(xdir, files)
if brief is True:
results['files'] = [f.findtext('file-name').strip() for f in files]
else:
results['files'] = {
f.findtext('file-name').strip(): FS._decode_file(f)
for f in files
}
return results
# -------------------------------------------------------------------------
# storage_usage - filesystem storage usage
# -------------------------------------------------------------------------
def storage_usage(self):
rsp = self._dev.rpc.get_system_storage()
_name = lambda fs: fs.findtext('filesystem-name').strip()
def _decode(fs):
r = {}
r['mount'] = fs.find('mounted-on').text.strip()
tb = fs.find('total-blocks')
r['total'] = tb.get('format')
r['total_blocks'] = int(tb.text)
ub = fs.find('used-blocks')
r['used'] = ub.get('format')
r['used_blocks'] = int(ub.text)
r['used_pct'] = fs.find('used-percent').text.strip()
ab = fs.find('available-blocks')
r['avail'] = ab.get('format')
r['avail_block'] = int(ab.text)
return r
return {_name(fs): _decode(fs) for fs in rsp.xpath('filesystem')}
# -------------------------------------------------------------------------
### storage_cleanup_check, storage_cleanip
# -------------------------------------------------------------------------
@classmethod
def _decode_storage_cleanup(cls, files):
_name = lambda f: f.findtext('file-name').strip()
def _decode(f):
return {
'size': int(f.findtext('size')),
'ts_date': f.findtext('date').strip()
}
# return a dict of name/decode pairs for each file
return {_name(f): _decode(f) for f in files}
def storage_cleanup_check(self):
"""
Perform the 'request system storage cleanup dry-run' command
to return a :dict: of files/info that would be removed if
the cleanup command was executed.
"""
rsp = self._dev.rpc.request_system_storage_cleanup(dry_run=True)
files = rsp.xpath('file-list/file')
return FS._decode_storage_cleanup(files)
def storage_cleanup(self):
"""
Perform the 'request system storage cleanup' command to remove
files from the filesystem. Return a :dict: of file name/info
on the files that were removed.
"""
rsp = self._dev.rpc.request_system_storage_cleanup()
files = rsp.xpath('file-list/file')
return FS._decode_storage_cleanup(files)
# -------------------------------------------------------------------------
# rm - local file delete
# -------------------------------------------------------------------------
def rm(self, path):
"""
Performs a local file delete action, per Junos CLI command
"file delete". If the file does not exist, then this returns False.
"""
# the return value from this RPC will return either True if the delete
# was successful, or an XML structure otherwise. So we can do a simple
# test to provide the return result to the caller.
rsp = self._dev.rpc.file_delete(path=path)
if rsp is True:
return True
else:
return False
# -------------------------------------------------------------------------
# cp - local file copy
# -------------------------------------------------------------------------
def cp(self, from_path, to_path):
"""
Perform a local file copy where :from_path: and :to_path: can be any
valid Junos path argument. Refer to the Junos "file copy" command
documentation for details.
Returns True if OK, False if file does not exist.
"""
# this RPC returns True if it is OK. If the file does not exist
# this RPC will generate an RpcError exception, so just return False
try:
self._dev.rpc.file_copy(source=from_path, destination=to_path)
except:
return False
return True
# -------------------------------------------------------------------------
# mv - local file rename
# -------------------------------------------------------------------------
def mv(self, from_path, to_path):
"""
Perform a local file rename function, same as "file rename" Junos CLI.
"""
rsp = self._dev.rpc.file_rename(source=from_path, destination=to_path)
if rsp is True:
return True
else:
return False
def tgz(self, from_path, tgz_path):
"""
create a file called :tgz_path: that is the tar-gzip of the given
directory specified :from_path:
"""
rsp = self._dev.rpc.file_archive(compress=True,
source=from_path,
destination=tgz_path)
# if the rsp is True, then the command executed OK.
if rsp is True:
return True
# otherwise, return the error string to the caller
return rsp.text
# -------------------------------------------------------------------------
# !!!!! methods that use SSH shell commands, requires that the user
# !!!!! has 'start shell' priveldges
# -------------------------------------------------------------------------
def _ssh_exec(self, command):
with StartShell(self._dev) as sh:
got = sh.run(command)
ok = sh.last_ok
return (ok, got)
def rmdir(self, path):
"""
~| REQUIRES SHELL PRIVELDGES |~
executes the 'rmdir' command on path
returns True if OK, or error string
"""
results = self._ssh_exec("rmdir %s" % path)
return True if results[0] is True else ''.join(results[1][2:-1])
def mkdir(self, path):
"""
~| REQUIRES SHELL PRIVELDGES |~
executes the 'mkdir -p' command on path
returns True if OK, or error string
"""
results = self._ssh_exec("mkdir -p %s" % path)
return True if results[0] is True else ''.join(results[1][2:-1])
def symlink(self, from_path, to_path):
"""
~| REQUIRES SHELL PRIVELDGES |~
executes the 'ln -sf <from_path> <to_path>' command
returns True if OK, or error string
"""
results = self._ssh_exec("ln -sf %s %s" % (from_path, to_path))
return True if results[0] is True else ''.join(results[1][2:-1])
|
dgjnpr/py-junos-eznc
|
lib/jnpr/junos/utils/fs.py
|
Python
|
apache-2.0
| 13,736
|
# -*- coding: utf-8 -*-
import os
from hashlib import md5
from django.conf import settings
from django.db import models
from knowledge_base.core.db.models import CatalogueMixin
def get_area_image_path(instance, filename):
"""
Get the upload path to the profile image.
"""
return '{0}/{1}{2}'.format(
"areas/originals",
md5(filename).hexdigest(),
os.path.splitext(filename)[-1]
)
def get_area_thumbnail_path(instance, filename):
"""
Get the proper upload path to the thumbnail profile image.
"""
return '{0}/{1}'.format(
"areas/thumbnails",
filename
)
class Area(CatalogueMixin):
"""
Areas of the knowledge base.
"""
description = models.TextField(
max_length=250,
null=True,
blank=True
)
photo = models.ImageField(
null=True,
blank=True,
upload_to=get_area_image_path
)
thumbnail = models.ImageField(
null=True,
blank=True,
upload_to=get_area_thumbnail_path
)
@property
def thumbnail_settings(self):
"""
Property used to configurate thumbnail creation settings.
"""
return {
"dimension": "300x200",
"original_field": "photo",
"thumbnail_field": "thumbnail"
}
class Meta:
ordering = ['name']
class Category(CatalogueMixin):
"""
Category that belongs to each area.
"""
area = models.ForeignKey(
Area,
related_name='categories'
)
description = models.TextField(
max_length=250,
null=True,
blank=True
)
class Meta:
verbose_name = 'category'
verbose_name_plural = 'categories'
ordering = ['area__name', 'name']
def __unicode__(self):
return self.get_full_name()
def get_full_name(self):
area = Area.objects.get(id=self.area_id)
return u"{0} > {1}".format(area.name, self.name)
class Subject(CatalogueMixin):
"""
Subject that belongs to each area.
"""
category = models.ForeignKey(
Category,
related_name='subjects',
default=1
)
description = models.TextField(
max_length=250,
null=True,
blank=True
)
class Meta:
ordering = [
'category__area__name',
'category__name',
'name'
]
def __unicode__(self):
return self.get_full_name()
def get_full_name(self):
category = Category.objects.get(id=self.category_id)
return u"{0} > {1}".format(category.name, self.name)
class Post(CatalogueMixin):
"""
Main model of the application, is essentially information of a subject.
"""
subject = models.ForeignKey(
Subject,
related_name='posts'
)
resume = models.CharField(
max_length=150
)
content = models.TextField()
author = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='posts',
verbose_name='author'
)
available_to = models.ManyToManyField(
settings.AUTH_USER_MODEL,
blank=True,
related_name=u'available_posts',
verbose_name='users who can view the post.'
)
editable_to = models.ManyToManyField(
settings.AUTH_USER_MODEL,
blank=True,
related_name=u'editable_posts',
verbose_name='users who can edit the post.'
)
class Meta:
ordering = [
'subject__category__area__name',
'subject__category__name',
'subject__name',
'name'
]
|
jualjiman/knowledge-base
|
src/knowledge_base/posts/models.py
|
Python
|
apache-2.0
| 3,653
|
import warnings
try:
from jsoneditor.fields.django_jsonfield import JSONField as JSONFieldBase
except ImportError:
from django.db import models
class JSONFieldBase(models.TextField):
def __init__(self, *args, **kwargs):
warnings.warn(
'"jsoneditor" module not available, to enable json mode '
'please run: "pip install djongo[json]"', stacklevel=2)
models.TextField.__init__(self, *args, **kwargs)
class JSONField(JSONFieldBase):
def get_prep_value(self, value):
return value
|
nesdis/djongo
|
djongo/models/json.py
|
Python
|
agpl-3.0
| 570
|
from setuptools import setup, find_packages
setup(
name='freezegame',
version='0.1',
packages=find_packages(),
)
|
mattfister/freezegame
|
setup.py
|
Python
|
mit
| 127
|
def hanoi (n,s,t,b):
assert n > 0
if n == 1:
print 'move', s, 'to', t
else:
hanoi(n-1, s, b, t)
hanoi(1, s , t, b)
hanoi (n-1, b, t, s)
x = int(raw_input("Enter The Number Of Disks In The Tower Of Hanoi: "))
for i in range (1, x):
print 'New Hanoi Example: hanoi(',i,', source, target, buffer)'
print '________________________________________________________'
hanoi(i, 'source', 'target', 'buffer')
print ' '
|
AlanJudi/Alanjudi
|
towerHanoi.py
|
Python
|
gpl-3.0
| 475
|
z = lambda x, y=1, *args, **kwargs: x * y + sum(args) * kwargs.get("k", 1)
z(<arg1>1, <arg2>2, <arg3>4, <arg4>k=5)
|
asedunov/intellij-community
|
python/testData/paramInfo/LambdaVariousArgs.py
|
Python
|
apache-2.0
| 115
|
"""Urls for feedgrabber authors"""
from django.conf.urls.defaults import *
from feedgrabber.models import Author
author_conf = {'queryset': Author.objects.all()}
urlpatterns = patterns('django.views.generic.list_detail',
url(r'^$', 'object_list',
author_conf, 'feedgrabber_author_list'),
)
urlpatterns += patterns('feedgrabber.views.authors',
url(r'^(?P<slug>[-\w]+)/$', 'view_author_detail',
name='feedgrabber_author_detail'),
)
|
Fantomas42/django-feedgrabber
|
feedgrabber/urls/authors.py
|
Python
|
bsd-3-clause
| 583
|
__author__ = "Jordan Anderson"
__email__ = "jodog59@gmail.com"
from pynq import MMIO
from pynq import PL
class Register(object):
"""This class controls the Registers near the Video PR.
Attributes
----------
index : int
The index of the register, starting from 0.
offset : int
The offset from the base register to the given register
"""
_mmio = None
def __init__(self, reg):
"""Create a new register object.
Parameters
----------
reg : int
Index of the register, from 0 to 7.
"""
if not reg in range(8):
raise Value("Index for registers should be 0 - 7.")
self.reg = reg
self.reg_offset = reg * 4
if Register._mmio is None:
Register._mmio = MMIO(int(PL.ip_dict["SEG_Video_PR_0_S_AXI_reg"][0],16),32)
def write(self, data):
"""Set the register value according to the input data.
Parameters
----------
data : int
This parameter can be any 32 bit value
"""
Register._mmio.write(self.reg_offset,data)
def read(self):
"""Read the value in the register.
Returns
-------
int
A 32-bit value contained within the register
"""
return Register._mmio.read(self.reg_offset)
|
AEW2015/PYNQ_PR_Overlay
|
python/pynq/board/registers.py
|
Python
|
bsd-3-clause
| 1,397
|
"""
Low level *Skype for Linux* interface implemented using *XWindows messaging*.
Uses direct *Xlib* calls through *ctypes* module.
This module handles the options that you can pass to `Skype.__init__`
for Linux machines when the transport is set to *X11*.
No further options are currently supported.
Warning PyGTK framework users
=============================
The multithreaded architecture of Skype4Py requires a special treatment
if the Xlib transport is combined with PyGTK GUI framework.
The following code has to be called at the top of your script, before
PyGTK is even imported.
.. python::
from Skype4Py.api.posix_x11 import threads_init
threads_init()
This function enables multithreading support in Xlib and GDK. If not done
here, this is enabled for Xlib library when the `Skype` object is instantiated.
If your script imports the PyGTK module, doing this so late may lead to a
segmentation fault when the GUI is shown on the screen.
A remedy is to enable the multithreading support before PyGTK is imported
by calling the ``threads_init`` function.
"""
__docformat__ = 'restructuredtext en'
import sys
import threading
import os
from ctypes import *
from ctypes.util import find_library
import time
import logging
from Skype4Py.api import Command, SkypeAPIBase, \
timeout2float, finalize_opts
from Skype4Py.enums import *
from Skype4Py.errors import SkypeAPIError
__all__ = ['SkypeAPI', 'threads_init']
# The Xlib Programming Manual:
# ============================
# http://tronche.com/gui/x/xlib/
# some Xlib constants
PropertyChangeMask = 0x400000
PropertyNotify = 28
ClientMessage = 33
PropertyNewValue = 0
PropertyDelete = 1
# some Xlib types
c_ulong_p = POINTER(c_ulong)
DisplayP = c_void_p
Atom = c_ulong
AtomP = c_ulong_p
XID = c_ulong
Window = XID
Bool = c_int
Status = c_int
Time = c_ulong
c_int_p = POINTER(c_int)
# should the structures be aligned to 8 bytes?
align = (sizeof(c_long) == 8 and sizeof(c_int) == 4)
# some Xlib structures
class XClientMessageEvent(Structure):
if align:
_fields_ = [('type', c_int),
('pad0', c_int),
('serial', c_ulong),
('send_event', Bool),
('pad1', c_int),
('display', DisplayP),
('window', Window),
('message_type', Atom),
('format', c_int),
('pad2', c_int),
('data', c_char * 20)]
else:
_fields_ = [('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', DisplayP),
('window', Window),
('message_type', Atom),
('format', c_int),
('data', c_char * 20)]
class XPropertyEvent(Structure):
if align:
_fields_ = [('type', c_int),
('pad0', c_int),
('serial', c_ulong),
('send_event', Bool),
('pad1', c_int),
('display', DisplayP),
('window', Window),
('atom', Atom),
('time', Time),
('state', c_int),
('pad2', c_int)]
else:
_fields_ = [('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', DisplayP),
('window', Window),
('atom', Atom),
('time', Time),
('state', c_int)]
class XErrorEvent(Structure):
if align:
_fields_ = [('type', c_int),
('pad0', c_int),
('display', DisplayP),
('resourceid', XID),
('serial', c_ulong),
('error_code', c_ubyte),
('request_code', c_ubyte),
('minor_code', c_ubyte)]
else:
_fields_ = [('type', c_int),
('display', DisplayP),
('resourceid', XID),
('serial', c_ulong),
('error_code', c_ubyte),
('request_code', c_ubyte),
('minor_code', c_ubyte)]
class XEvent(Union):
if align:
_fields_ = [('type', c_int),
('xclient', XClientMessageEvent),
('xproperty', XPropertyEvent),
('xerror', XErrorEvent),
('pad', c_long * 24)]
else:
_fields_ = [('type', c_int),
('xclient', XClientMessageEvent),
('xproperty', XPropertyEvent),
('xerror', XErrorEvent),
('pad', c_long * 24)]
XEventP = POINTER(XEvent)
if getattr(sys, 'skype4py_setup', False):
# we get here if we're building docs; to let the module import without
# exceptions, we emulate the X11 library using a class:
class X(object):
def __getattr__(self, name):
return self
def __setattr__(self, name, value):
pass
def __call__(self, *args, **kwargs):
pass
x11 = X()
else:
# load X11 library (Xlib)
libpath = find_library('X11')
if not libpath:
raise ImportError('Could not find X11 library')
x11 = cdll.LoadLibrary(libpath)
del libpath
# setup Xlib function prototypes
x11.XCloseDisplay.argtypes = (DisplayP,)
x11.XCloseDisplay.restype = None
x11.XCreateSimpleWindow.argtypes = (DisplayP, Window, c_int, c_int, c_uint,
c_uint, c_uint, c_ulong, c_ulong)
x11.XCreateSimpleWindow.restype = Window
x11.XDefaultRootWindow.argtypes = (DisplayP,)
x11.XDefaultRootWindow.restype = Window
x11.XDeleteProperty.argtypes = (DisplayP, Window, Atom)
x11.XDeleteProperty.restype = None
x11.XDestroyWindow.argtypes = (DisplayP, Window)
x11.XDestroyWindow.restype = None
x11.XFree.argtypes = (c_void_p,)
x11.XFree.restype = None
x11.XGetAtomName.argtypes = (DisplayP, Atom)
x11.XGetAtomName.restype = c_void_p
x11.XGetErrorText.argtypes = (DisplayP, c_int, c_char_p, c_int)
x11.XGetErrorText.restype = None
x11.XGetWindowProperty.argtypes = (DisplayP, Window, Atom, c_long, c_long, Bool,
Atom, AtomP, c_int_p, c_ulong_p, c_ulong_p, POINTER(POINTER(Window)))
x11.XGetWindowProperty.restype = c_int
x11.XInitThreads.argtypes = ()
x11.XInitThreads.restype = Status
x11.XInternAtom.argtypes = (DisplayP, c_char_p, Bool)
x11.XInternAtom.restype = Atom
x11.XNextEvent.argtypes = (DisplayP, XEventP)
x11.XNextEvent.restype = None
x11.XOpenDisplay.argtypes = (c_char_p,)
x11.XOpenDisplay.restype = DisplayP
x11.XPending.argtypes = (DisplayP,)
x11.XPending.restype = c_int
x11.XSelectInput.argtypes = (DisplayP, Window, c_long)
x11.XSelectInput.restype = None
x11.XSendEvent.argtypes = (DisplayP, Window, Bool, c_long, XEventP)
x11.XSendEvent.restype = Status
x11.XLockDisplay.argtypes = (DisplayP,)
x11.XLockDisplay.restype = None
x11.XUnlockDisplay.argtypes = (DisplayP,)
x11.XUnlockDisplay.restype = None
def threads_init(gtk=True):
"""Enables multithreading support in Xlib and PyGTK.
See the module docstring for more info.
:Parameters:
gtk : bool
May be set to False to skip the PyGTK module.
"""
# enable X11 multithreading
x11.XInitThreads()
if gtk:
from gtk.gdk import threads_init
threads_init()
class SkypeAPI(SkypeAPIBase):
def __init__(self, opts):
self.logger = logging.getLogger('Skype4Py.api.posix_x11.SkypeAPI')
SkypeAPIBase.__init__(self)
finalize_opts(opts)
# initialize threads if not done already by the user
threads_init(gtk=False)
# init Xlib display
self.disp = x11.XOpenDisplay(None)
if not self.disp:
raise SkypeAPIError('Could not open XDisplay')
self.win_root = x11.XDefaultRootWindow(self.disp)
self.win_self = x11.XCreateSimpleWindow(self.disp, self.win_root,
100, 100, 100, 100, 1, 0, 0)
x11.XSelectInput(self.disp, self.win_root, PropertyChangeMask)
self.win_skype = self.get_skype()
ctrl = 'SKYPECONTROLAPI_MESSAGE'
self.atom_msg = x11.XInternAtom(self.disp, ctrl, False)
self.atom_msg_begin = x11.XInternAtom(self.disp, ctrl + '_BEGIN', False)
self.loop_event = threading.Event()
self.loop_timeout = 0.0001
self.loop_break = False
def __del__(self):
if x11:
if hasattr(self, 'disp'):
if hasattr(self, 'win_self'):
x11.XDestroyWindow(self.disp, self.win_self)
x11.XCloseDisplay(self.disp)
def run(self):
self.logger.info('thread started')
# main loop
event = XEvent()
data = ''
while not self.loop_break and x11:
while x11.XPending(self.disp):
self.loop_timeout = 0.0001
x11.XNextEvent(self.disp, byref(event))
# events we get here are already prefiltered by the predicate function
if event.type == ClientMessage:
if event.xclient.format == 8:
if event.xclient.message_type == self.atom_msg_begin:
data = str(event.xclient.data)
elif event.xclient.message_type == self.atom_msg:
if data != '':
data += str(event.xclient.data)
else:
self.logger.warning('Middle of Skype X11 message received with no beginning!')
else:
continue
if len(event.xclient.data) != 20 and data:
self.notify(data.decode('utf-8'))
data = ''
elif event.type == PropertyNotify:
namep = x11.XGetAtomName(self.disp, event.xproperty.atom)
is_inst = (c_char_p(namep).value == '_SKYPE_INSTANCE')
x11.XFree(namep)
if is_inst:
if event.xproperty.state == PropertyNewValue:
self.win_skype = self.get_skype()
# changing attachment status can cause an event handler to be fired, in
# turn it could try to call Attach() and doing this immediately seems to
# confuse Skype (command '#0 NAME xxx' returns '#0 CONNSTATUS OFFLINE' :D);
# to fix this, we give Skype some time to initialize itself
time.sleep(1.0)
self.set_attachment_status(apiAttachAvailable)
elif event.xproperty.state == PropertyDelete:
self.win_skype = None
self.set_attachment_status(apiAttachNotAvailable)
self.loop_event.wait(self.loop_timeout)
if self.loop_event.isSet():
self.loop_timeout = 0.0001
elif self.loop_timeout < 1.0:
self.loop_timeout *= 2
self.loop_event.clear()
self.logger.info('thread finished')
def get_skype(self):
"""Returns Skype window ID or None if Skype not running."""
skype_inst = x11.XInternAtom(self.disp, '_SKYPE_INSTANCE', True)
if not skype_inst:
return
type_ret = Atom()
format_ret = c_int()
nitems_ret = c_ulong()
bytes_after_ret = c_ulong()
winp = pointer(Window())
fail = x11.XGetWindowProperty(self.disp, self.win_root, skype_inst,
0, 1, False, 33, byref(type_ret), byref(format_ret),
byref(nitems_ret), byref(bytes_after_ret), byref(winp))
if not fail and format_ret.value == 32 and nitems_ret.value == 1:
return winp.contents.value
def close(self):
self.loop_break = True
self.loop_event.set()
while self.isAlive():
time.sleep(0.01)
SkypeAPIBase.close(self)
def set_friendly_name(self, friendly_name):
SkypeAPIBase.set_friendly_name(self, friendly_name)
if self.attachment_status == apiAttachSuccess:
# reattach with the new name
self.set_attachment_status(apiAttachUnknown)
self.attach()
def attach(self, timeout, wait=True):
if self.attachment_status == apiAttachSuccess:
return
self.acquire()
try:
if not self.isAlive():
try:
self.start()
except AssertionError:
raise SkypeAPIError('Skype API closed')
try:
self.wait = True
t = threading.Timer(timeout2float(timeout), lambda: setattr(self, 'wait', False))
if wait:
t.start()
while self.wait:
self.win_skype = self.get_skype()
if self.win_skype is not None:
break
else:
time.sleep(1.0)
else:
raise SkypeAPIError('Skype attach timeout')
finally:
t.cancel()
command = Command('NAME %s' % self.friendly_name, '', True, timeout)
self.release()
try:
self.send_command(command, True)
finally:
self.acquire()
if command.Reply != 'OK':
self.win_skype = None
self.set_attachment_status(apiAttachRefused)
return
self.set_attachment_status(apiAttachSuccess)
finally:
self.release()
command = Command('PROTOCOL %s' % self.protocol, Blocking=True)
self.send_command(command, True)
self.protocol = int(command.Reply.rsplit(None, 1)[-1])
def is_running(self):
return (self.get_skype() is not None)
def startup(self, minimized, nosplash):
# options are not supported as of Skype 1.4 Beta for Linux
if not self.is_running():
if os.fork() == 0: # we're the child
os.setsid()
os.execlp('skype')
def shutdown(self):
from signal import SIGINT
fh = os.popen('ps -o %p --no-heading -C skype')
pid = fh.readline().strip()
fh.close()
if pid:
os.kill(int(pid), SIGINT)
# Skype sometimes doesn't delete the '_SKYPE_INSTANCE' property
skype_inst = x11.XInternAtom(self.disp, '_SKYPE_INSTANCE', True)
if skype_inst:
x11.XDeleteProperty(self.disp, self.win_root, skype_inst)
self.win_skype = None
self.set_attachment_status(apiAttachNotAvailable)
def send_command(self, command, force=False):
if self.attachment_status != apiAttachSuccess and not force:
self.attach(command.Timeout)
self.push_command(command)
self.notifier.sending_command(command)
cmd = u'#%d %s' % (command.Id, command.Command)
self.logger.debug('sending %s', repr(cmd))
if command.Blocking:
command._event = bevent = threading.Event()
else:
command._timer = timer = threading.Timer(command.timeout2float(), self.pop_command, (command.Id,))
event = XEvent()
event.xclient.type = ClientMessage
event.xclient.display = self.disp
event.xclient.window = self.win_self
event.xclient.message_type = self.atom_msg_begin
event.xclient.format = 8
cmd = cmd.encode('utf-8') + '\x00'
for i in xrange(0, len(cmd), 20):
event.xclient.data = cmd[i:i + 20]
x11.XSendEvent(self.disp, self.win_skype, False, 0, byref(event))
event.xclient.message_type = self.atom_msg
self.loop_event.set()
if command.Blocking:
bevent.wait(command.timeout2float())
if not bevent.isSet():
raise SkypeAPIError('Skype command timeout')
else:
timer.start()
def notify(self, cmd):
self.logger.debug('received %s', repr(cmd))
# Called by main loop for all received Skype commands.
if cmd.startswith(u'#'):
p = cmd.find(u' ')
command = self.pop_command(int(cmd[1:p]))
if command is not None:
command.Reply = cmd[p + 1:]
if command.Blocking:
command._event.set()
else:
command._timer.cancel()
self.notifier.reply_received(command)
else:
self.notifier.notification_received(cmd[p + 1:])
else:
self.notifier.notification_received(cmd)
|
jpablobr/emacs.d
|
vendor/misc/emacs-skype/build/Skype4Py/Skype4Py/api/posix_x11.py
|
Python
|
gpl-3.0
| 17,077
|
#!/usr/bin/python
import pickle as pkl
import sys
def main(pose_id, file_with_list_of_poses):
'''Takes the pickle file that we have already stored with a dictionary of poses in it.
Then it deletes a line in the ragdoll_model2_plugin.cc and replaces the line with a
line taken from the dictionary corresponding to the POSE_ID. This line is just a C++ declaration
of the array from which the initial pose of the sleeping human is defined. By changing this line in
the plugin, we are changing the initial pose in which the model starts.
Of course, the shell script start_autobed_training.sh will then compile the above .cc file before it
uses it in the gazebo simulation.
REMEMBER TO REMOVE THE OLD FILE AND RENAME THE newly created model3_plugin.cc using the shell script.
'''
poses_dict = pkl.load(open(file_with_list_of_poses, "rb"))
plugin_file = open('/home/yashc/fuerte_workspace/sandbox/git/hrl_autobed_dev/hrl_gazebo_autobed/sdf/new_ragdoll/gazebo_model_plugin/ros_ragdoll_model2_plugin.cc', "r")
sdf_file = open('/home/yashc/fuerte_workspace/sandbox/git/hrl_autobed_dev/hrl_gazebo_autobed/sdf/new_ragdoll/correct_ragdoll_original.sdf')
plugin_lines = plugin_file.readlines()
sdf_lines = sdf_file.readlines()
plugin_file.close()
sdf_file.close()
plugin_file_to_write = open('/home/yashc/fuerte_workspace/sandbox/git/hrl_autobed_dev/hrl_gazebo_autobed/sdf/new_ragdoll/gazebo_model_plugin/ros_ragdoll_model3_plugin.cc', "wb")
sdf_file_to_write = open('/home/yashc/fuerte_workspace/sandbox/git/hrl_autobed_dev/hrl_gazebo_autobed/sdf/new_ragdoll/correct_ragdoll_temporary.sdf', "wb")
plugin_string_to_search = "float joint_angles"
sdf_string_to_search = "<pose>"
count = 0
for line in plugin_lines:
if not plugin_string_to_search in line:
plugin_file_to_write.write(line)
else:
plugin_file_to_write.write(poses_dict[pose_id][1]+'\n')
plugin_file_to_write.close()
for line in sdf_lines:
if (not sdf_string_to_search in line) or (count >= 1):
sdf_file_to_write.write(line)
else:
sdf_file_to_write.write(poses_dict[pose_id][0]+'\n')
count = count + 1
sdf_file_to_write.close()
if __name__ == "__main__":
main(int(sys.argv[1]), sys.argv[2])
|
gt-ros-pkg/hrl_autobed_dev
|
autobed_pose_estimator/src/model_plugin_modifier.py
|
Python
|
mit
| 2,355
|
# -*- coding: utf-8 -*-
import logging
import os
_logger = logging.getLogger(__name__)
try:
import MySQLdb
import MySQLdb.cursors
except ImportError:
pass
from openerp.addons.import_framework.import_base import import_base
try:
from pandas import merge, DataFrame
except ImportError:
pass
from openerp.addons.import_framework.mapper import *
import re
import time
import datetime as DT
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import csv
import glob
from openerp.osv.fields import sanitize_binary_value
class fixdate_custom(mapper):
"""
convert '2010/12/31 13:26:25' to '2010-12-31'
"""
def __init__(self, field_name):
self.field_name = field_name
def __call__(self, external_values):
s = external_values.get(self.field_name)
if not s:
return ''
m,d,y = str(s).split(' ')[0].split('/')
return '20%s-%s-%s' % (y,m,d)
class image(mapper):
def __init__(self, val):
self.val = val
def __call__(self, external_values):
val = external_values.get(self.val)
files = glob.glob('/home/tmp/thumbs/%s_*' % val)
max_file = None
max_size = 0
for f in files:
size = os.path.getsize(f)
if size > 93000:
continue
if size < max_size:
continue
max_size = size
max_file = f
if not max_file:
return None
with open(max_file, 'r') as f:
b = f.read()
val = sanitize_binary_value(b)
return val
class import_custom(import_base):
TABLE_PROSPECTS = 'prospects_burda'
TABLE_PROSPECTS_TAG = TABLE_PROSPECTS + '_tag'
TABLE_PRODUCT = 'products'
TABLE_PRODUCT_CATEGORY = 'categories'
COL_LINE_NUM = 'line_num'
def initialize(self):
self.csv_files = self.context.get('csv_files')
self.import_options.update({'separator':',',
#'quoting':''
})
def get_data(self, table):
file_name = filter(lambda f: f.endswith('/%s.csv' % table), self.csv_files)
if file_name:
_logger.info('read file "%s"' % ( '%s.csv' % table))
file_name = file_name[0]
else:
_logger.info('file not found %s' % ( '%s.csv' % table))
return []
with open(file_name, 'rb') as csvfile:
fixed_file = StringIO(csvfile.read() .replace('\r\n', '\n'))
reader = csv.DictReader(fixed_file,
delimiter = self.import_options.get('separator'),
#quotechar = self.import_options.get('quoting'),
)
res = list(reader)
for line_num, line in enumerate(res):
line[self.COL_LINE_NUM] = str(line_num)
return res
def get_mapping(self):
return [
self.get_mapping_partners(),
self.get_mapping_product_categories(),
self.get_mapping_products(),
]
def get_table(self, table):
def f():
t = DataFrame(self.get_data(table))
#t = t[:10] # for debug
return t
return f
def get_hook_tag(self, field_name):
def f(external_values):
res = []
value = external_values.get(field_name)
value = value or ''
if not isinstance(value, basestring):
value = str(value)
for v in value.split(','):
#v = do_clean_sugar(v)
if v:
res.append({field_name:v})
return res
return f
def tag(self, model, xml_id_prefix, field_name):
parent = xml_id_prefix + field_name
return {'model':model,
'hook':self.get_hook_tag(field_name),
'fields': {
'id': xml_id(parent, field_name),
'name': field_name,
#'parent_id/id':const('sugarcrm_migration.'+parent),
}
}
def get_mapping_partners(self):
return {
'name': self.TABLE_PROSPECTS,
'table': self.get_table(self.TABLE_PROSPECTS),
'dependencies' : [],
'models':[
self.tag('res.partner.category', self.TABLE_PROSPECTS_TAG, 'Tag'),
self.tag('res.partner.category', self.TABLE_PROSPECTS_TAG, 'Tags'),
self.tag('res.partner.category', self.TABLE_PROSPECTS_TAG, 'TypeName'),
{'model' : 'res.partner',
'fields': {
'id': xml_id(self.TABLE_PROSPECTS, 'External ID'),
'name': 'Name',
'lang': const('es_ES'),
'is_company': map_val('Is a Company', {'True':'1', 'False':'0'}, default='0'),
'customer': const('1'),
'supplier': const('0'),
'category_id/id': tags_from_fields(self.TABLE_PROSPECTS_TAG, ['Tag','Tags', 'TypeName']),
'street': 'Street',
'street2': 'Street2',
'zip': 'Zip',
'city': 'City',
'phone': 'Phone',
'mobile': 'Mobile',
'email': 'Email',
'country_id/.id': country_by_name('Country'),
'date': fixdate_custom('CreationDate'),
'comment': ppconcat('Subscription'),
}
},
{'model' : 'res.partner',
'hook': self.get_hook_ignore_empty('ContactLastname', 'ContactEmail'),
'fields': {
'id': xml_id(self.TABLE_PROSPECTS+'_child', 'External ID'),
'parent_id/id': xml_id(self.TABLE_PROSPECTS, 'External ID'),
'name': concat('ContactTitle', 'ContactFirstname', 'ContactLastname', delimiter=' '),
'customer': const('1'),
'supplier': const('0'),
'function': 'ContactJobtitle',
'phone': 'ContactPhone',
'fax': 'ContactFax',
'email': 'ContactEmail',
'lang': const('es_ES'),
'comment': ppconcat('ContactGender'),
}
}
]
}
def get_mapping_product_categories(self):
return {
'name': self.TABLE_PRODUCT_CATEGORY,
'table': self.get_table(self.TABLE_PRODUCT_CATEGORY),
'dependencies' : [],
'models':[
{'model' : 'product.public.category',
'fields': {
'id': xml_id(self.TABLE_PRODUCT_CATEGORY, 'id'),
'name': 'label',
},
},
{'model' : 'product.public.category',
'hook': lambda vals: vals.get('parent_id')!='NULL' and vals or None,
'fields': {
'id': xml_id(self.TABLE_PRODUCT_CATEGORY, 'id'),
'name': 'label',
'parent_id/id': xml_id(self.TABLE_PRODUCT_CATEGORY, 'parent_id'),
},
},
]
}
def table_product(self):
t = DataFrame(self.get_data('ecom_items'))
t = merge(t,
DataFrame(self.get_data('ecom_items_ref')),
how='left',
left_on='ID',
suffixes=('', '_ref'),
right_on='ecom_items_id')
t = merge(t,
DataFrame(self.get_data('item_categories')),
how='left',
left_on='ID',
suffixes=('', '_categories'),
right_on='ecom_items_id')
#t = merge(t,
# DataFrame(self.get_data('thumbs')),
# how='left',
# left_on='id', # from ecom_items_ref
# suffixes=('', '_thumbs'),
# right_on='ecom_items_ref_id')
#t = t[:500] # for debug
return t
def get_mapping_products(self):
return {
'name': self.TABLE_PRODUCT,
'table': self.table_product,
'dependencies' : [self.TABLE_PRODUCT_CATEGORY],
'models':[
{'model':'product.category',
'fields': {
'id': xml_id(self.TABLE_PRODUCT + '_brand', 'Brand'),
'name': 'Brand',
}
},
{'model' : 'product.product',
'split' : 1000,
'fields': {
'id': xml_id(self.TABLE_PRODUCT, 'ID'),
'categ_id/id': xml_id(self.TABLE_PRODUCT + '_brand', 'Brand'),
'name': 'Label',
'website_published': 'published',
'default_code': 'ID',
'standard_price': 'price_purchase',
'lst_price': 'price_sales',
'active': lambda record: not int(record['disabled']),
'public_categ_id/id': xml_id(self.TABLE_PRODUCT_CATEGORY, 'ecom_category_id'),
'image_medium': image('id'),
'description': ppconcat(
'color',
'weight',
'size',
'custom_code',
#'price_purchase',
'vat_code',
#'price_sales',
'stock_min',
'stock_max',
'packaging',
'packaging_pro',
'packaging_public',
'tags',
'eco_tax',
'EAN_code',
'disabled',
'body'
),
},
}
]
}
|
litnimax/addons-yelizariev
|
import_custom/import_custom.py
|
Python
|
lgpl-3.0
| 10,226
|
# Update all genes from Entrez Gene that
# are relevant to the organism we passed in via the
# command line
import Config
import sys, string, argparse
import MySQLdb
import Database
import gzip
from classes import EntrezGene
# Process Command Line Input
argParser = argparse.ArgumentParser( description = 'Update all genes from Entrez Gene that are relevant to the organism id passed in via the command line.' )
argGroup = argParser.add_mutually_exclusive_group( required=True )
argGroup.add_argument( '-o', help = 'NCBI Organism ID', type=int, dest = 'organismID', action='store' )
argGroup.add_argument( '-g', dest='genes', nargs = '+', help = 'An Entrez Gene ID List to Update', action='store' )
inputArgs = vars( argParser.parse_args( ) )
isOrganism = False
isGene = False
if None != inputArgs['organismID'] :
isOrganism = True
elif None != inputArgs['genes'] :
isGene = True
with Database.db as cursor :
entrezGene = EntrezGene.EntrezGene( Database.db, cursor )
organismList = entrezGene.fetchEntrezGeneOrganismMapping( )
existingEntrezGeneIDs = { }
organismID = 0
if isOrganism :
if inputArgs['organismID'][0] in organismList :
organismID = organismList[inputArgs['organismID'][0]]
existingEntrezGeneIDs = entrezGene.fetchExistingEntrezGeneIDsByOrganism( organismID )
cursor.execute( "UPDATE " + Config.DB_NAME + ".genes SET gene_status='inactive' WHERE organism_id=%s", [organismID] )
Database.db.commit( )
elif isGene :
for gene in inputArgs['genes'] :
geneID = entrezGene.geneExists( gene )
if geneID :
existingEntrezGeneIDs[gene] = geneID
cursor.execute( "UPDATE " + Config.DB_NAME + ".genes SET gene_status='inactive' WHERE gene_id=%s", [geneID] )
Database.db.commit( )
insertCount = 0
with gzip.open( Config.EG_GENEINFO, 'r' ) as file :
for line in file.readlines( ) :
line = line.strip( )
# Ignore Header Line
if "#" == line[0] :
continue
splitLine = line.split( "\t" )
entrezGeneTaxID = int(splitLine[0].strip( ))
sourceID = splitLine[1].strip( )
officialSymbol = splitLine[2].strip( )
geneType = splitLine[9].strip( )
# Skip NEWENTRY records
if "NEWENTRY" == officialSymbol :
continue
toProcess = False
if isOrganism and inputArgs['organismID'][0] == entrezGeneTaxID :
toProcess = True
elif isGene and str(sourceID) in inputArgs['genes'] :
toProcess = True
if entrezGeneTaxID in organismList :
organismID = organismList[entrezGeneTaxID]
if toProcess :
if sourceID in existingEntrezGeneIDs :
# Found Gene ID already in the Database
# Update info with no problems
currentGeneID = existingEntrezGeneIDs[sourceID]
insertCount = insertCount + 1
print "UPDATING EXISTING GENE"
cursor.execute( "UPDATE " + Config.DB_NAME + ".genes SET gene_type=%s, gene_name=%s, gene_name_type=%s, gene_status='active', gene_updated=NOW( ) WHERE gene_id=%s", [geneType, officialSymbol, 'entrez-official', currentGeneID] )
else :
# Not already in Database
# insert it.
insertCount = insertCount + 1
print "INSERTING NEW GENE"
cursor.execute( "INSERT INTO " + Config.DB_NAME + ".genes VALUES( '0', %s, %s, %s, %s, %s, 'active', NOW( ), NOW( ), %s, '0' )", [officialSymbol, 'entrez-official', sourceID, geneType, organismID, 'ENTREZ'] )
if 0 == (insertCount % Config.DB_COMMIT_COUNT ) :
Database.db.commit( )
Database.db.commit( )
cursor.execute( "INSERT INTO " + Config.DB_STATS + ".update_tracker VALUES ( '0', 'EG_updateGenes', NOW( ) )" )
Database.db.commit( )
sys.exit( )
|
BioGRID/BioGRID-Annotation
|
EG_updateGenes.py
|
Python
|
mit
| 3,753
|
# coding: utf-8
import json
import unittest
from app import app
from app.models.properties import Property
class ResourcesTestCase(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.client = app.test_client()
Property.drop_collection()
self.propertie_1 = Property(
title="Imóvel código 665, com 1 quarto e 1 banheiro",
price=540000,
description="Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
x=667,
y=556,
beds=1,
baths=1,
squareMeters=42
).save()
def test_delete_property(self):
response = self.client.delete('/properties/{id}'.format(id=self.propertie_1.id))
self.assertEqual(response.status_code, 204)
def test_delete_inexistent_property(self):
response = self.client.delete('/properties/{id}'.format(id='7867asdasd3434'))
content = json.loads(response.data)
self.assertEqual(response.status_code, 404)
self.assertDictEqual(content,
{u'message': u'property with id '
u'7867asdasd3434 does not exist.'})
def test_delete_property_with_missing_params(self):
response = self.client.delete('/properties/')
content = json.loads(response.data)
self.assertEqual(response.status_code, 400)
self.assertDictEqual(content,
{u'message': u'You must provide an id'})
def test_post_properties_invalid_square_meters(self):
data = {
"x": 500,
"y": 800,
"title": u"Imóvel código 1, com 5 quartos e 4 banheiros",
"price": 1250000,
"description": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
"beds": 3,
"baths": 2,
"squareMeters": 250,
}
response = self.client.post("/properties", data=data)
content = json.loads(response.data)
self.assertEqual(response.status_code, 422)
self.assertDictEqual(content,
{u'message': u'squareMeters cannot be greater than 240 or lower than 20.'})
def test_post_properties_invalid_number_of_bathrooms(self):
data = {
"x": 500,
"y": 800,
"title": u"Imóvel código 1, com 5 quartos e 4 banheiros",
"price": 1250000,
"description": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
"beds": 3,
"baths": 0,
"squareMeters": 210,
}
response = self.client.post("/properties", data=data)
content = json.loads(response.data)
self.assertEqual(response.status_code, 422)
self.assertDictEqual(content,
{u'message': u'The number of baths cannot be lower than 1 or greater than 4'})
def test_post_properties_invalid_number_of_beds(self):
data = {
"x": 500,
"y": 800,
"title": u"Imóvel código 1, com 5 quartos e 4 banheiros",
"price": 1250000,
"description": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
"beds": 10,
"baths": 3,
"squareMeters": 210,
}
response = self.client.post("/properties", data=data)
content = json.loads(response.data)
self.assertEqual(response.status_code, 422)
self.assertDictEqual(content,
{u'message': u'The number of beds cannot be lower than 1 or greater than 5'})
def test_post_properties_out_of_bounds(self):
data = {
"x": 1500,
"y": 1200,
"title": u"Imóvel código 1, com 5 quartos e 4 banheiros",
"price": 1250000,
"description": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
"beds": 4,
"baths": 3,
"squareMeters": 210,
}
response = self.client.post("/properties", data=data)
content = json.loads(response.data)
self.assertEqual(response.status_code, 422)
self.assertDictEqual(content,
{u'message':
u'x value cannot be lower than zero'
u' or greater than 1400.x is 1500'})
def test_post_properties(self):
data = {
"x": 222,
"y": 444,
"title": u"Imóvel código 1, com 5 quartos e 4 banheiros",
"price": 1250000,
"description": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
"beds": 4,
"baths": 3,
"squareMeters": 210,
}
response = self.client.post("/properties", data=data)
self.assertEqual(response.status_code, 201)
def test_post_insufficient_params(self):
data = {
"x": 222,
"y": 444,
"title": u"Imóvel código 1, com 5 quartos e 4 banheiros",
"price": 1250000,
"description": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
"beds": 4,
"baths": 3,
}
response = self.client.post("/properties", data=data)
content = json.loads(response.data)
self.assertEqual(response.status_code, 400)
self.assertDictEqual(content, {
u'message': {
u'squareMeters': u'Missing required parameter in the JSON body'
u' or the post body or the query string'}})
def test_get_property_by_id(self):
response = self.client.get(
"/properties/{id}".format(id=self.propertie_1.id))
content = json.loads(response.data)
property_ = self.propertie_1.to_dict()
property_['provinces'] = ['Ruja']
self.assertEqual(response.status_code, 200)
self.assertDictEqual(property_, content)
def test_get_property_with_insufficient_params(self):
response = self.client.get("/properties")
content = json.loads(response.data)
self.assertEqual(response.status_code, 400)
self.assertDictEqual(content, {
u'message': u"You must provide an id or a query string with 'ax', 'bx', 'ay', 'by"})
def test_get_property_invalid_id(self):
response = self.client.get(
"/properties/{id}".format(id="4f4381f4e779897a2c000009"))
content = json.loads(response.data)
self.assertEqual(response.status_code, 404)
self.assertDictEqual(content,
{u'message': u'property with id 4f4381f4e779897a2c000009 not found.'})
def test_get_properties_by_area(self):
ax, bx = (700, 600)
ay, by = (400, 700)
response = self.client.get("/properties?ax={ax}&bx={bx}&ay={ay}&by={by}".format(
ax=ax,
bx=bx,
ay=ay,
by=by
))
content = json.loads(response.data)
property_ = self.propertie_1.to_dict()
property_['provinces'] = [u'Ruja']
self.assertEqual(response.status_code, 200)
self.assertDictEqual(content, {
'foundProperties': 1,
'properties': [property_]
})
|
IuryAlves/code-challenge
|
tests/resource_tests.py
|
Python
|
mit
| 7,327
|
from semeval import helper as helper
from semeval.lstms.LSTMModel import LSTMModel
import numpy
from keras.models import Sequential
from keras.layers import Dense, Activation, Bidirectional, LSTM, Dropout
from keras.callbacks import EarlyStopping
class EarlyStoppingLSTM(LSTMModel):
'''Model that can train an LSTM and apply the trainned model to unseen
data. Inherits from LSTMModel.
Instance Arguments:
self._word2vec_model - gensim.models.Word2Vec required as an argument to __init__
self._max_length = 0
self._model = None
public methods:
train - trains a Bi-directional LSTM with dropout and early stopping on
the texts and sentiment values given.
test - Using the trained model saved at self._model will return a list of
sentiment values given the texts in the argument of the method.
'''
def __init__(self, word2vec_model):
super().__init__(word2vec_model)
def fit(self, train_texts, sentiment_values):
'''Given a list of Strings and a list of floats (sentiments) or numpy
array of floats. It will return a trained LSTM model and `save` the model to
self._model for future use using self.test(texts).
The model converts the list of strings into list of numpy matrixs
which has the following dimensions:
length of the longest train text broken down into tokens
by
the vector size of the word2vec model given in the constructor
e.g. 21, 300 if the word2vec model vector size if 300 and the length of
the longest train text in tokens is 21.
For more details on the layers use read the source or after training
visualise using visualise_model function.
'''
super().fit()
max_length = self._set_max_length(train_texts)
vector_length = self._word2vec_model.vector_size
train_vectors = self._text2vector(train_texts)
model = Sequential()
model.add(Dropout(0.5, input_shape=(max_length, vector_length)))
# Output of this layer is of max_length by max_length * 2 dimension
# instead of max_length, vector_length
model.add(Bidirectional(LSTM(max_length, activation='softsign',
return_sequences=True)))
model.add(Dropout(0.5))
model.add(Bidirectional(LSTM(max_length, activation='softsign')))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('linear'))
model.compile(loss='mse',
optimizer='rmsprop',
metrics=['cosine_proximity'],
clipvalue=5)
early_stopping = EarlyStopping(monitor='val_loss', patience=10)
model.fit(train_vectors, sentiment_values, validation_split=0.1,
callbacks=[early_stopping] , nb_epoch=100)
return self._set_model(model)
|
apmoore1/semeval
|
lstms/EarlyStoppingLSTM.py
|
Python
|
gpl-3.0
| 2,914
|
import random
#Differential evolution optimizer based on Scipy implementation:
#http://python-scipy.sourcearchive.com/documentation/0.6.0/classscipy_1_1sandbox_1_1rkern_1_1diffev_1_1DiffEvolver.html
def argmin(x):
if len(x) < 2:
return x[0]
bestv,idx = x[0],0
for e,i in enumerate(x[1:],1):
if i<bestv:
bestv = i
idx = e
return idx
class DiffEvolver(object):
"""Minimize a function using differential evolution.
Constructors
------------
DiffEvolver(func, pop0, args=(), crossover_rate=0.5, scale=None,
strategy=('rand', 2, 'bin'), eps=1e-6)
func -- function to minimize
pop0 -- sequence of initial vectors
args -- additional arguments to apply to func
crossover_rate -- crossover probability [0..1] usually 0.5 or so
scale -- scaling factor to apply to differences [0..1] usually > 0.5
if None, then calculated from pop0 using a heuristic
strategy -- tuple specifying the differencing/crossover strategy
The first element is one of 'rand', 'best', 'rand-to-best' to specify
how to obtain an initial trial vector.
The second element is either 1 or 2 (or only 1 for 'rand-to-best') to
specify the number of difference vectors to add to the initial trial.
The third element is (currently) 'bin' to specify binomial crossover.
eps -- if the maximum and minimum function values of a given generation are
with eps of each other, convergence has been achieved.
DiffEvolver.frombounds(func, lbound, ubound, npop, crossover_rate=0.5,
scale=None, strategy=('rand', 2, 'bin'), eps=1e-6)
Randomly initialize the population within given rectangular bounds.
lbound -- lower bound vector
ubound -- upper bound vector
npop -- size of population
Public Methods
--------------
solve(newgens=100)
Run the minimizer for newgens more generations. Return the best parameter
vector from the whole run.
Public Members
--------------
best_value -- lowest function value in the history
best_vector -- minimizing vector
best_val_history -- list of best_value's for each generation
best_vec_history -- list of best_vector's for each generation
population -- current population
pop_values -- respective function values for each of the current population
generations -- number of generations already computed
func, args, crossover_rate, scale, strategy, eps -- from constructor
"""
def __init__(self, func, pop0, args=(), crossover_rate=0.5, scale=None,
strategy=('rand', 2, 'bin'), eps=1e-6, lbound=None, ubound=None):
self.func = func
self.population = pop0
self.npop, self.ndim = len(self.population),len(self.population[0])
self.args = args
self.crossover_rate = crossover_rate
self.strategy = strategy
self.eps = eps
self.lbound = lbound
self.ubound = ubound
self.bounds = lbound!=None and ubound!=None
self.pop_values = [self.func(m, *args) for m in self.population]
bestidx = argmin(self.pop_values)
self.best_vector = self.population[bestidx]
self.best_value = self.pop_values[bestidx]
if scale is None:
self.scale = self.calculate_scale()
else:
self.scale = scale
self.generations = 0
self.best_val_history = []
self.best_vec_history = []
self.jump_table = {
('rand', 1, 'bin'): (self.choose_rand, self.diff1, self.bin_crossover),
('rand', 2, 'bin'): (self.choose_rand, self.diff2, self.bin_crossover),
('best', 1, 'bin'): (self.choose_best, self.diff1, self.bin_crossover),
('best', 2, 'bin'): (self.choose_best, self.diff2, self.bin_crossover),
('rand-to-best', 1, 'bin'):
(self.choose_rand_to_best, self.diff1, self.bin_crossover),
}
def clear(self):
self.best_val_history = []
self.best_vec_history = []
self.generations = 0
self.pop_values = [self.func(m, *self.args) for m in self.population]
def frombounds(cls, func, lbound, ubound, npop, crossover_rate=0.5,
scale=None, x0=None, strategy=('rand', 2, 'bin'), eps=1e-6):
if x0==None:
pop0 = [[random.random()*(ubound[i]-lbound[i]) + lbound[i] for i in xrange(len(lbound))] for c in xrange(npop)]
else:
pop0 = [0]*npop
for e,x in enumerate(x0):
if len(x)!=len(lbound):
raise ValueError("Dimension of x0[{}] is incorrect".format(e))
if any(not lbound[i]<=x[i]<=ubound[i] for i in xrange(len(lbound))):
raise ValueError("x0[{}] not inside the bounds.".format(e))
for i in xrange(e,npop,len(x0)):
pop0[i] = x
delta = 0.3
pop0 = [[delta*(random.random()*(ubound[i]-lbound[i]) + lbound[i])+p[i] for i in xrange(len(lbound))] for p in pop0]
pop0 = [[lbound[i] if p[i]<lbound[i] else (ubound[i] if p[i]>ubound[i] else p[i]) for i in xrange(len(lbound))] for p in pop0]
#Make sure to include x0
pop0[:len(x0)] = x0
return cls(func, pop0, crossover_rate=crossover_rate, scale=scale,
strategy=strategy, eps=eps, lbound=lbound, ubound=ubound)
frombounds = classmethod(frombounds)
def calculate_scale(self):
rat = abs(max(self.pop_values)/self.best_value)
rat = min(rat, 1./rat)
return max(0.3, 1.-rat)
def bin_crossover(self, oldgene, newgene):
new = oldgene[:]
for i in xrange(len(oldgene)):
if random.random() < self.crossover_rate:
new[i] = newgene[i]
return new
def select_samples(self, candidate, nsamples):
possibilities = range(self.npop)
possibilities.remove(candidate)
random.shuffle(possibilities)
return possibilities[:nsamples]
def diff1(self, candidate):
i1, i2 = self.select_samples(candidate, 2)
y = [(self.population[i1][c] - self.population[i2][c]) for c in xrange(self.ndim)]
y = [self.scale*i for i in y]
return y
def diff2(self, candidate):
i1, i2, i3, i4 = self.select_samples(candidate, 4)
y = ([(self.population[i1][c] - self.population[i2][c]+self.population[i3][c] - self.population[i4][c]) for c in xrange(self.ndim)])
y = [self.scale*i for i in y]
return y
def choose_best(self, candidate):
return self.best_vector
def choose_rand(self, candidate):
i = self.select_samples(candidate, 1)[0]
return self.population[i]
def choose_rand_to_best(self, candidate):
return ((1-self.scale) * self.population[candidate] +
self.scale * self.best_vector)
def get_trial(self, candidate):
chooser, differ, crosser = self.jump_table[self.strategy]
chosen = chooser(candidate)
diffed = differ(candidate)
new = [chosen[i] + diffed[i] for i in xrange(self.ndim)]
trial = crosser(self.population[candidate],new)
if self.bounds:
if random.random() < 0.2:
trial = self.hug_bounds(trial)
else:
trial = self.mirror_bounds(trial)
return trial
def mirror_bounds(self,trial):
"""Mirrors values over bounds back to bounded area,
or randomly generates a new coordinate if mirroring failed."""
for i in xrange(self.ndim):
if trial[i]<self.lbound[i]:
trial[i] = 2*self.lbound[i]-trial[i]
if trial[i]<self.lbound[i]:
trial[i] = random.random()*(self.ubound[i]-self.lbound[i]) + self.lbound[i]
elif trial[i]>self.ubound[i]:
trial[i] = 2*self.ubound[i]-trial[i]
if trial[i]>self.ubound[i]:
trial[i] = random.random()*(self.ubound[i]-self.lbound[i]) + self.lbound[i]
return trial
def hug_bounds(self,trial):
"""Rounds values over bounds to bounds"""
for i in xrange(self.ndim):
if trial[i]<self.lbound[i]:
trial[i] = self.lbound[i]
elif trial[i]>self.ubound[i]:
trial[i] = self.ubound[i]
return trial
def converged(self):
return max(self.pop_values) - min(self.pop_values) <= self.eps
def solve(self, newgens=100):
"""Run for newgens more generations.
Return best parameter vector from the entire run.
"""
for gen in xrange(self.generations+1, self.generations+newgens+1):
for candidate in range(self.npop):
trial = self.get_trial(candidate)
trial_value = self.func(trial, *self.args)
if trial_value < self.pop_values[candidate]:
self.population[candidate] = trial
self.pop_values[candidate] = trial_value
if trial_value < self.best_value:
self.best_vector = trial
self.best_value = trial_value
self.best_val_history.append(self.best_value)
self.best_vec_history.append(self.best_vector)
if self.converged():
break
self.generations = gen
return self.best_vector
|
Ttl/evolutionary-circuits
|
evolutionary/optimization/diff_evolve.py
|
Python
|
mit
| 9,473
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pika
from random import randint
from Queue import (
Queue,
Empty
)
from configman import (
Namespace,
class_converter
)
from socorro.external.rabbitmq.connection_context import (
ConnectionContextPooled
)
from socorro.external.crashstorage_base import (
CrashStorageBase,
)
#==============================================================================
class RabbitMQCrashStorage(CrashStorageBase):
"""This class is an implementation of a Socorro Crash Storage system.
It is used as a crash queing methanism for raw crashes. It implements
the save_raw_crash method as a queue submission function, and the
new_crashes generator as a queue consumption function. Please note: as
it only queues the crash_id and not the whole raw crash, it is not suitable
to actually save a crash. It is a very lossy container. This class
should be used in conjuction with a more persistant storage mechanism.
The implementations CrashStorage classes can use arbitrarly high or low
level semantics to talk to their underlying resource. In the RabbitMQ,
implementation, queing through the 'save_raw_crash' method is given full
transactional semantics using the TransactorExecutor classes. The
'new_crashes' generator has a lower level relationship with the
underlying connection object"""
required_config = Namespace()
required_config.add_option(
'rabbitmq_class',
default=ConnectionContextPooled, # we choose a pooled connection
# because we need thread safe
# connection behaviors
doc='the class responsible for connecting to RabbitMQ',
reference_value_from='resource.rabbitmq',
)
required_config.add_option(
'transaction_executor_class',
default="socorro.database.transaction_executor."
"TransactionExecutorWithInfiniteBackoff",
doc='a class that will manage transactions',
from_string_converter=class_converter,
reference_value_from='resource.rabbitmq',
)
required_config.add_option(
'routing_key',
default='socorro.normal',
doc='the name of the queue to recieve crashes',
reference_value_from='resource.rabbitmq',
)
required_config.add_option(
'filter_on_legacy_processing',
default=True,
doc='toggle for using or ignoring the throttling flag',
reference_value_from='resource.rabbitmq',
)
required_config.add_option(
'throttle',
default=100,
doc='percentage of the time that rabbit will try to queue',
reference_value_from='resource.rabbitmq',
)
#--------------------------------------------------------------------------
def __init__(self, config, quit_check_callback=None):
super(RabbitMQCrashStorage, self).__init__(
config,
quit_check_callback=quit_check_callback
)
self.config = config
# Note: this may continue to grow if we aren't acking certain UUIDs.
# We should find a way to time out UUIDs after a certain time.
self.acknowledgement_token_cache = {}
self.acknowledgment_queue = Queue()
self.rabbitmq = config.rabbitmq_class(config)
self.transaction = config.transaction_executor_class(
config,
self.rabbitmq,
quit_check_callback=quit_check_callback
)
# cache this object so we don't have to remake it for every transaction
self._basic_properties = pika.BasicProperties(
delivery_mode=2, # make message persistent
)
if config.throttle == 100:
self.dont_queue_this_crash = lambda: False
else:
self.dont_queue_this_crash = lambda: randint(1, 100) > config.throttle
#--------------------------------------------------------------------------
def save_raw_crash(self, raw_crash, dumps, crash_id):
if self.dont_queue_this_crash():
self.config.logger.info(
'Crash %s filtered out of RabbitMQ queue %s',
crash_id,
self.config.routing_key
)
return
try:
this_crash_should_be_queued = (
(not self.config.filter_on_legacy_processing)
or
raw_crash.legacy_processing == 0
)
except KeyError:
self.config.logger.debug(
'RabbitMQCrashStorage legacy_processing key absent in crash '
'%s', crash_id
)
return
if this_crash_should_be_queued:
self.config.logger.debug(
'RabbitMQCrashStorage saving crash %s', crash_id
)
self.transaction(self._save_raw_crash_transaction, crash_id)
else:
self.config.logger.debug(
'RabbitMQCrashStorage not saving crash %s, legacy processing '
'flag is %s', crash_id, raw_crash.legacy_processing
)
#--------------------------------------------------------------------------
def _save_raw_crash_transaction(self, connection, crash_id):
connection.channel.basic_publish(
exchange='',
routing_key=self.config.routing_key,
body=crash_id,
properties=self._basic_properties
)
#--------------------------------------------------------------------------
def _basic_get_transaction(self, conn, queue):
"""reorganize the the call to rabbitmq basic_get so that it can be
used by the transaction retry wrapper."""
things = conn.channel.basic_get(queue=queue)
return things
#--------------------------------------------------------------------------
def new_crashes(self):
"""This generator fetches crash_ids from RabbitMQ."""
# We've set up RabbitMQ to require acknowledgement of processing of a
# crash_id from this generator. It is the responsibility of the
# consumer of the crash_id to tell this instance of the class when has
# completed its work on the crash_id. That is done with the call to
# 'ack_crash' below. Because RabbitMQ connections are not thread safe,
# only the thread that read the crash may acknowledge it. 'ack_crash'
# queues the crash_id. The '_consume_acknowledgement_queue' function
# is run to send acknowledgments back to RabbitMQ
self._consume_acknowledgement_queue()
conn = self.rabbitmq.connection()
queues = [
self.rabbitmq.config.priority_queue_name,
self.rabbitmq.config.standard_queue_name,
self.rabbitmq.config.reprocessing_queue_name,
self.rabbitmq.config.priority_queue_name,
]
while True:
for queue in queues:
method_frame, header_frame, body = self.transaction(
self._basic_get_transaction,
queue=queue
)
if method_frame and self._suppress_duplicate_jobs(
body,
method_frame
):
continue
if method_frame:
break
# must consume ack queue before testing for end of iterator
# or the last job won't get ack'd
self._consume_acknowledgement_queue()
if not method_frame:
# there was nothing in the queue - leave the iterator
return
self.acknowledgement_token_cache[body] = method_frame
yield body
queues.reverse()
#--------------------------------------------------------------------------
def ack_crash(self, crash_id):
self.acknowledgment_queue.put(crash_id)
#--------------------------------------------------------------------------
def _suppress_duplicate_jobs(self, crash_id, acknowledgement_token):
"""if this crash is in the cache, then it is already in progress
and this is a duplicate. Acknowledge it, then return to True
to let the caller know to skip on to the next crash."""
if crash_id in self.acknowledgement_token_cache:
# reject this crash - it's already being processsed
self.config.logger.info(
'duplicate job: %s is already in progress',
crash_id
)
# ack this
self.transaction(
self._transaction_ack_crash,
crash_id,
acknowledgement_token
)
return True
return False
#--------------------------------------------------------------------------
def _consume_acknowledgement_queue(self):
"""The acknowledgement of the processing of each crash_id yielded
from the 'new_crashes' method must take place on the same connection
that the crash_id came from. The crash_ids are queued in the
'acknowledgment_queue'. That queue is consumed by the QueuingThread"""
try:
while True:
crash_id_to_be_acknowledged = \
self.acknowledgment_queue.get_nowait()
#self.config.logger.debug(
#'RabbitMQCrashStorage set to acknowledge %s',
#crash_id_to_be_acknowledged
#)
try:
acknowledgement_token = \
self.acknowledgement_token_cache[
crash_id_to_be_acknowledged
]
self.transaction(
self._transaction_ack_crash,
crash_id_to_be_acknowledged,
acknowledgement_token
)
del self.acknowledgement_token_cache[
crash_id_to_be_acknowledged
]
except KeyError:
self.config.logger.warning(
'RabbitMQCrashStorage tried to acknowledge crash %s'
', which was not in the cache',
crash_id_to_be_acknowledged,
exc_info=True
)
except Exception as x:
self.config.logger.error(
'RabbitMQCrashStorage unexpected failure on %s',
crash_id_to_be_acknowledged,
exc_info=True
)
except Empty:
pass # nothing to do with an empty queue
#--------------------------------------------------------------------------
def _transaction_ack_crash(
self,
connection,
crash_id,
acknowledgement_token
):
connection.channel.basic_ack(
delivery_tag=acknowledgement_token.delivery_tag
)
self.config.logger.debug(
'RabbitMQCrashStorage acking %s with delivery_tag %s',
crash_id,
acknowledgement_token.delivery_tag
)
#==============================================================================
class ReprocessingRabbitMQCrashStore(RabbitMQCrashStorage):
RabbitMQCrashStorage.required_config.routing_key.set_default(
'socorro.reprocessing',
force=True
)
RabbitMQCrashStorage.required_config.filter_on_legacy_processing \
.set_default('socorro.reprocessing', force=True)
|
cliqz/socorro
|
socorro/external/rabbitmq/crashstorage.py
|
Python
|
mpl-2.0
| 11,842
|
import tensorflow as tf
from tensorflow import keras
import deepx.config
deepx.config.set_backend("tensorflow")
from deepx import keras as nn
from deepx.backend import T
T.set_default_device(T.gpu())
# input image dimensions
img_rows, img_cols = 28, 28
num_classes = 10
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
inputs = keras.Input((28, 28, 1))
network = (
nn.Conv2D(64, (5, 5), padding='same') >> nn.ReLU() >> nn.MaxPooling2D(padding='same')
>> nn.Conv2D(64, (5, 5), padding='same') >> nn.ReLU() >> nn.MaxPooling2D(padding='same')
>> nn.Flatten() >> nn.Dense(1024)
>> nn.ReLU() >> nn.Dense(10) >> nn.Softmax()
)
outputs = network(inputs)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
keras.backend.set_session(sess)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=128,
epochs=10)
test_scores = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', test_scores[0])
print('Test accuracy:', test_scores[1])
|
sharadmv/deepx
|
examples/tensorflow/keras_cnn.py
|
Python
|
mit
| 1,872
|
# -*- coding: utf-8 -*-
import requests
class TuLing(object):
SUB_TYPE_TEXT = 100000
def __init__(self, *args, **kwargs):
super(TuLing, self).__init__()
self._api_key = kwargs.get('api_key')
self._api_secret = kwargs.get('api_secret')
self._api_url = kwargs.get('api_url')
def _post(self, data):
response = requests.post(url=self._api_url, data=data)
if response.status_code is not 200:
return None
return response.json()
def replay_text(self, info):
data = {
'key': self._api_key,
'info': info
}
return_dict = self._post(data)
if return_dict['code'] != TuLing.SUB_TYPE_TEXT:
return None
message = return_dict['text']
return message.encode('utf-8')
|
ciknight/doge
|
ext/tuling/__init__.py
|
Python
|
mit
| 825
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.